Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 377831
Collapse All | Expand All

(-)a/arch/x86/Kconfig (+4 lines)
Lines 480-485 config SCHED_OMIT_FRAME_POINTER Link Here
480
480
481
menuconfig PARAVIRT_GUEST
481
menuconfig PARAVIRT_GUEST
482
	bool "Paravirtualized guest support"
482
	bool "Paravirtualized guest support"
483
 	depends on !IPIPE
483
	---help---
484
	---help---
484
	  Say Y here to get to see options related to running Linux under
485
	  Say Y here to get to see options related to running Linux under
485
	  various hypervisors.  This option alone does not add any kernel code.
486
	  various hypervisors.  This option alone does not add any kernel code.
Lines 531-536 source "arch/x86/lguest/Kconfig" Link Here
531
532
532
config PARAVIRT
533
config PARAVIRT
533
	bool "Enable paravirtualization code"
534
	bool "Enable paravirtualization code"
535
 	depends on !IPIPE
534
	---help---
536
	---help---
535
	  This changes the kernel so it can modify itself when it is run
537
	  This changes the kernel so it can modify itself when it is run
536
	  under a hypervisor, potentially improving performance significantly
538
	  under a hypervisor, potentially improving performance significantly
Lines 750-755 config SCHED_MC Link Here
750
752
751
source "kernel/Kconfig.preempt"
753
source "kernel/Kconfig.preempt"
752
754
755
source "kernel/ipipe/Kconfig"
756
753
config X86_UP_APIC
757
config X86_UP_APIC
754
	bool "Local APIC support on uniprocessors"
758
	bool "Local APIC support on uniprocessors"
755
	depends on X86_32 && !SMP && !X86_32_NON_STANDARD
759
	depends on X86_32 && !SMP && !X86_32_NON_STANDARD
(-)a/arch/x86/include/asm/apic.h (+6 lines)
Lines 404-410 static inline u32 safe_apic_wait_icr_idle(void) Link Here
404
}
404
}
405
405
406
406
407
#ifdef CONFIG_IPIPE
408
#define ack_APIC_irq() do { } while(0)
409
static inline void __ack_APIC_irq(void)
410
#else /* !CONFIG_IPIPE */
411
#define __ack_APIC_irq() ack_APIC_irq()
407
static inline void ack_APIC_irq(void)
412
static inline void ack_APIC_irq(void)
413
#endif /* CONFIG_IPIPE */
408
{
414
{
409
#ifdef CONFIG_X86_LOCAL_APIC
415
#ifdef CONFIG_X86_LOCAL_APIC
410
	/*
416
	/*
(-)a/arch/x86/include/asm/apicdef.h (+4 lines)
Lines 143-148 Link Here
143
# define MAX_LOCAL_APIC 32768
143
# define MAX_LOCAL_APIC 32768
144
#endif
144
#endif
145
145
146
#ifndef __ASSEMBLY__
146
/*
147
/*
147
 * All x86-64 systems are xAPIC compatible.
148
 * All x86-64 systems are xAPIC compatible.
148
 * In the following, "apicid" is a physical APIC ID.
149
 * In the following, "apicid" is a physical APIC ID.
Lines 418-421 struct local_apic { Link Here
418
#else
419
#else
419
 #define BAD_APICID 0xFFFFu
420
 #define BAD_APICID 0xFFFFu
420
#endif
421
#endif
422
423
#endif /* !__ASSEMBLY__ */
424
421
#endif /* _ASM_X86_APICDEF_H */
425
#endif /* _ASM_X86_APICDEF_H */
(-)a/arch/x86/include/asm/entry_arch.h (+2 lines)
Lines 22-27 BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, Link Here
22
		 smp_invalidate_interrupt)
22
		 smp_invalidate_interrupt)
23
BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
23
BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
24
		 smp_invalidate_interrupt)
24
		 smp_invalidate_interrupt)
25
#ifndef CONFIG_IPIPE
25
BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
26
BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
26
		 smp_invalidate_interrupt)
27
		 smp_invalidate_interrupt)
27
BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
28
BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
Lines 32-37 BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6, Link Here
32
		 smp_invalidate_interrupt)
33
		 smp_invalidate_interrupt)
33
BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
34
BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
34
		 smp_invalidate_interrupt)
35
		 smp_invalidate_interrupt)
36
#endif /* !CONFIG_IPIPE */
35
#endif
37
#endif
36
38
37
BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
39
BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
(-)a/arch/x86/include/asm/hw_irq.h (+8 lines)
Lines 35-40 extern void spurious_interrupt(void); Link Here
35
extern void thermal_interrupt(void);
35
extern void thermal_interrupt(void);
36
extern void reschedule_interrupt(void);
36
extern void reschedule_interrupt(void);
37
extern void mce_self_interrupt(void);
37
extern void mce_self_interrupt(void);
38
#ifdef CONFIG_IPIPE
39
void ipipe_ipi0(void);
40
void ipipe_ipi1(void);
41
void ipipe_ipi2(void);
42
void ipipe_ipi3(void);
43
void ipipe_ipiX(void);
44
#endif
38
45
39
extern void invalidate_interrupt(void);
46
extern void invalidate_interrupt(void);
40
extern void invalidate_interrupt0(void);
47
extern void invalidate_interrupt0(void);
Lines 115-120 extern void smp_invalidate_interrupt(struct pt_regs *); Link Here
115
#else
122
#else
116
extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
123
extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
117
#endif
124
#endif
125
extern asmlinkage void smp_reboot_interrupt(void);
118
#endif
126
#endif
119
127
120
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
128
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
(-)a/arch/x86/include/asm/i387.h (+3 lines)
Lines 289-299 static inline void __clear_fpu(struct task_struct *tsk) Link Here
289
static inline void kernel_fpu_begin(void)
289
static inline void kernel_fpu_begin(void)
290
{
290
{
291
	struct thread_info *me = current_thread_info();
291
	struct thread_info *me = current_thread_info();
292
	unsigned long flags;
292
	preempt_disable();
293
	preempt_disable();
294
	local_irq_save_hw_cond(flags);
293
	if (me->status & TS_USEDFPU)
295
	if (me->status & TS_USEDFPU)
294
		__save_init_fpu(me->task);
296
		__save_init_fpu(me->task);
295
	else
297
	else
296
		clts();
298
		clts();
299
	local_irq_restore_hw_cond(flags);
297
}
300
}
298
301
299
static inline void kernel_fpu_end(void)
302
static inline void kernel_fpu_end(void)
(-)a/arch/x86/include/asm/i8259.h (-1 / +1 lines)
Lines 24-30 extern unsigned int cached_irq_mask; Link Here
24
#define SLAVE_ICW4_DEFAULT	0x01
24
#define SLAVE_ICW4_DEFAULT	0x01
25
#define PIC_ICW4_AEOI		2
25
#define PIC_ICW4_AEOI		2
26
26
27
extern spinlock_t i8259A_lock;
27
extern ipipe_spinlock_t i8259A_lock;
28
28
29
extern void init_8259A(int auto_eoi);
29
extern void init_8259A(int auto_eoi);
30
extern void enable_8259A_irq(unsigned int irq);
30
extern void enable_8259A_irq(unsigned int irq);
(-)a/arch/x86/include/asm/ipi.h (+5 lines)
Lines 68-73 __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest Link Here
68
	 * to the APIC.
68
	 * to the APIC.
69
	 */
69
	 */
70
	unsigned int cfg;
70
	unsigned int cfg;
71
	unsigned long flags;
72
73
	local_irq_save_hw(flags);
71
74
72
	/*
75
	/*
73
	 * Wait for idle.
76
	 * Wait for idle.
Lines 83-88 __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest Link Here
83
	 * Send the IPI. The write to APIC_ICR fires this off.
86
	 * Send the IPI. The write to APIC_ICR fires this off.
84
	 */
87
	 */
85
	native_apic_mem_write(APIC_ICR, cfg);
88
	native_apic_mem_write(APIC_ICR, cfg);
89
90
	local_irq_restore_hw(flags);
86
}
91
}
87
92
88
/*
93
/*
(-)a/arch/x86/include/asm/ipipe.h (+158 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe.h
3
 *
4
 *   Copyright (C) 2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_H
23
#define __X86_IPIPE_H
24
25
#ifdef CONFIG_IPIPE
26
27
#ifndef IPIPE_ARCH_STRING
28
#define IPIPE_ARCH_STRING	"2.6-03"
29
#define IPIPE_MAJOR_NUMBER	2
30
#define IPIPE_MINOR_NUMBER	6
31
#define IPIPE_PATCH_NUMBER	3
32
#endif
33
34
DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
35
36
DECLARE_PER_CPU(unsigned long, __ipipe_cr2);
37
38
static inline unsigned __ipipe_get_irq_vector(int irq)
39
{
40
#ifdef CONFIG_X86_IO_APIC
41
	unsigned __ipipe_get_ioapic_irq_vector(int irq);
42
	return __ipipe_get_ioapic_irq_vector(irq);
43
#elif defined(CONFIG_X86_LOCAL_APIC)
44
	return irq >= IPIPE_FIRST_APIC_IRQ && irq < IPIPE_NR_XIRQS ?
45
		ipipe_apic_irq_vector(irq) : irq + IRQ0_VECTOR;
46
#else
47
	return irq + IRQ0_VECTOR;
48
#endif
49
}
50
51
#ifdef CONFIG_X86_32
52
# include "ipipe_32.h"
53
#else
54
# include "ipipe_64.h"
55
#endif
56
57
/*
58
 * The logical processor id and the current Linux task are read from the PDA,
59
 * so this is always safe, regardless of the underlying stack.
60
 */
61
#define ipipe_processor_id()	raw_smp_processor_id()
62
#define ipipe_safe_current()	current
63
64
#define prepare_arch_switch(next)		\
65
do {						\
66
	ipipe_schedule_notify(current, next);	\
67
	local_irq_disable_hw();			\
68
} while(0)
69
70
#define task_hijacked(p)						\
71
	({ int x = __ipipe_root_domain_p;				\
72
	__clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status));	\
73
	if (x) local_irq_enable_hw(); !x; })
74
75
struct ipipe_domain;
76
77
struct ipipe_sysinfo {
78
79
	int ncpus;		/* Number of CPUs on board */
80
	u64 cpufreq;		/* CPU frequency (in Hz) */
81
82
	/* Arch-dependent block */
83
84
	struct {
85
		unsigned tmirq;	/* Timer tick IRQ */
86
		u64 tmfreq;	/* Timer frequency */
87
	} archdep;
88
};
89
90
/* Private interface -- Internal use only */
91
92
#define __ipipe_check_platform()	do { } while(0)
93
#define __ipipe_init_platform()		do { } while(0)
94
#define __ipipe_enable_irq(irq)		irq_to_desc(irq)->chip->enable(irq)
95
#define __ipipe_disable_irq(irq)	irq_to_desc(irq)->chip->disable(irq)
96
97
#ifdef CONFIG_SMP
98
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
99
#else
100
#define __ipipe_hook_critical_ipi(ipd) do { } while(0)
101
#endif
102
103
#define __ipipe_disable_irqdesc(ipd, irq)	do { } while(0)
104
105
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq);
106
107
void __ipipe_enable_pipeline(void);
108
109
void __ipipe_do_critical_sync(unsigned irq, void *cookie);
110
111
void __ipipe_serial_debug(const char *fmt, ...);
112
113
extern int __ipipe_tick_irq;
114
115
#ifdef CONFIG_X86_LOCAL_APIC
116
#define ipipe_update_tick_evtdev(evtdev)				\
117
	do {								\
118
		if (strcmp((evtdev)->name, "lapic") == 0)		\
119
			__ipipe_tick_irq =				\
120
				ipipe_apic_vector_irq(LOCAL_TIMER_VECTOR); \
121
		else							\
122
			__ipipe_tick_irq = 0;				\
123
	} while (0)
124
#else
125
#define ipipe_update_tick_evtdev(evtdev)				\
126
	__ipipe_tick_irq = 0
127
#endif
128
129
int __ipipe_check_lapic(void);
130
131
int __ipipe_check_tickdev(const char *devname);
132
133
#define __ipipe_syscall_watched_p(p, sc)	\
134
	(((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
135
136
#define __ipipe_root_tick_p(regs)	((regs)->flags & X86_EFLAGS_IF)
137
138
#else /* !CONFIG_IPIPE */
139
140
#define ipipe_update_tick_evtdev(evtdev)	do { } while (0)
141
#define task_hijacked(p)			0
142
143
#endif /* CONFIG_IPIPE */
144
145
#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
146
#define __ipipe_move_root_irq(irq)					\
147
	do {								\
148
		if (irq < NR_IRQS) {					\
149
			struct irq_chip *chip = irq_to_desc(irq)->chip;	\
150
			if (chip->move)					\
151
				chip->move(irq);			\
152
		}							\
153
	} while (0)
154
#else /* !(CONFIG_SMP && CONFIG_IPIPE) */
155
#define __ipipe_move_root_irq(irq)	do { } while (0)
156
#endif /* !(CONFIG_SMP && CONFIG_IPIPE) */
157
158
#endif	/* !__X86_IPIPE_H */
(-)a/arch/x86/include/asm/ipipe_32.h (+156 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe_32.h
3
 *
4
 *   Copyright (C) 2002-2005 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_32_H
23
#define __X86_IPIPE_32_H
24
25
#include <linux/cpumask.h>
26
#include <linux/list.h>
27
#include <linux/threads.h>
28
#include <linux/ipipe_percpu.h>
29
#include <asm/ptrace.h>
30
31
#define ipipe_read_tsc(t)  __asm__ __volatile__("rdtsc" : "=A" (t))
32
#define ipipe_cpu_freq() ({ unsigned long long __freq = cpu_has_tsc?(1000LL * cpu_khz):CLOCK_TICK_RATE; __freq; })
33
34
#define ipipe_tsc2ns(t) \
35
({ \
36
	unsigned long long delta = (t)*1000; \
37
	do_div(delta, cpu_khz/1000+1); \
38
	(unsigned long)delta; \
39
})
40
41
#define ipipe_tsc2us(t) \
42
({ \
43
    unsigned long long delta = (t); \
44
    do_div(delta, cpu_khz/1000+1); \
45
    (unsigned long)delta; \
46
})
47
48
/* Private interface -- Internal use only */
49
50
int __ipipe_handle_irq(struct pt_regs *regs);
51
52
static inline unsigned long __ipipe_ffnz(unsigned long ul)
53
{
54
      __asm__("bsrl %1, %0":"=r"(ul)
55
      :	"r"(ul));
56
	return ul;
57
}
58
59
struct irq_desc;
60
61
void __ipipe_ack_edge_irq(unsigned irq, struct irq_desc *desc);
62
63
void __ipipe_end_edge_irq(unsigned irq, struct irq_desc *desc);
64
65
static inline void __ipipe_call_root_xirq_handler(unsigned irq,
66
						  ipipe_irq_handler_t handler)
67
{
68
	struct pt_regs *regs = &__raw_get_cpu_var(__ipipe_tick_regs);
69
70
	regs->orig_ax = ~__ipipe_get_irq_vector(irq);
71
72
	__asm__ __volatile__("pushfl\n\t"
73
			     "pushl %%cs\n\t"
74
			     "pushl $__xirq_end\n\t"
75
			     "pushl %%eax\n\t"
76
			     "pushl %%gs\n\t"
77
			     "pushl %%fs\n\t"
78
			     "pushl %%es\n\t"
79
			     "pushl %%ds\n\t"
80
			     "pushl %%eax\n\t"
81
			     "pushl %%ebp\n\t"
82
			     "pushl %%edi\n\t"
83
			     "pushl %%esi\n\t"
84
			     "pushl %%edx\n\t"
85
			     "pushl %%ecx\n\t"
86
			     "pushl %%ebx\n\t"
87
			     "movl  %2,%%eax\n\t"
88
			     "call *%1\n\t"
89
			     "jmp ret_from_intr\n\t"
90
			     "__xirq_end: cli\n"
91
			     : /* no output */
92
			     : "a" (~irq), "r" (handler), "rm" (regs));
93
}
94
95
void irq_enter(void);
96
void irq_exit(void);
97
98
static inline void __ipipe_call_root_virq_handler(unsigned irq,
99
						  ipipe_irq_handler_t handler,
100
						  void *cookie)
101
{
102
	irq_enter();
103
	__asm__ __volatile__("pushfl\n\t"
104
			     "pushl %%cs\n\t"
105
			     "pushl $__virq_end\n\t"
106
			     "pushl $-1\n\t"
107
			     "pushl %%gs\n\t"
108
			     "pushl %%fs\n\t"
109
			     "pushl %%es\n\t"
110
			     "pushl %%ds\n\t"
111
			     "pushl %%eax\n\t"
112
			     "pushl %%ebp\n\t"
113
			     "pushl %%edi\n\t"
114
			     "pushl %%esi\n\t"
115
			     "pushl %%edx\n\t"
116
			     "pushl %%ecx\n\t"
117
			     "pushl %%ebx\n\t"
118
			     "pushl %2\n\t"
119
			     "pushl %%eax\n\t"
120
			     "call *%1\n\t"
121
			     "addl $8,%%esp\n"
122
			     : /* no output */
123
			     : "a" (irq), "r" (handler), "d" (cookie));
124
	irq_exit();
125
	__asm__ __volatile__("jmp ret_from_intr\n\t"
126
			     "__virq_end: cli\n"
127
			     : /* no output */
128
			     : /* no input */);
129
}
130
131
/*
132
 * When running handlers, enable hw interrupts for all domains but the
133
 * one heading the pipeline, so that IRQs can never be significantly
134
 * deferred for the latter.
135
 */
136
#define __ipipe_run_isr(ipd, irq)					\
137
do {									\
138
	if (!__ipipe_pipeline_head_p(ipd))				\
139
		local_irq_enable_hw();					\
140
	if (ipd == ipipe_root_domain) {					\
141
		if (likely(!ipipe_virtual_irq_p(irq)))			\
142
			__ipipe_call_root_xirq_handler(irq,		\
143
						       ipd->irqs[irq].handler); \
144
		else							\
145
			__ipipe_call_root_virq_handler(irq,		\
146
						       ipd->irqs[irq].handler, \
147
						       ipd->irqs[irq].cookie); \
148
	} else {							\
149
		__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
150
		ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);	\
151
		__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
152
	}								\
153
	local_irq_disable_hw();						\
154
} while(0)
155
156
#endif	/* !__X86_IPIPE_32_H */
(-)a/arch/x86/include/asm/ipipe_64.h (+161 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe_64.h
3
 *
4
 *   Copyright (C) 2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_64_H
23
#define __X86_IPIPE_64_H
24
25
#include <asm/ptrace.h>
26
#include <asm/irq.h>
27
#include <linux/cpumask.h>
28
#include <linux/list.h>
29
#include <linux/ipipe_percpu.h>
30
#ifdef CONFIG_SMP
31
#include <asm/mpspec.h>
32
#include <linux/thread_info.h>
33
#endif
34
35
#define ipipe_read_tsc(t)  do {		\
36
	unsigned int __a,__d;			\
37
	asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
38
	(t) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
39
} while(0)
40
41
extern unsigned cpu_khz;
42
#define ipipe_cpu_freq() ({ unsigned long __freq = (1000UL * cpu_khz); __freq; })
43
#define ipipe_tsc2ns(t)	(((t) * 1000UL) / (ipipe_cpu_freq() / 1000000UL))
44
#define ipipe_tsc2us(t)	((t) / (ipipe_cpu_freq() / 1000000UL))
45
46
/* Private interface -- Internal use only */
47
48
int __ipipe_handle_irq(struct pt_regs *regs);
49
50
static inline unsigned long __ipipe_ffnz(unsigned long ul)
51
{
52
      __asm__("bsrq %1, %0":"=r"(ul)
53
	      :	"rm"(ul));
54
      return ul;
55
}
56
57
struct irq_desc;
58
59
void __ipipe_ack_edge_irq(unsigned irq, struct irq_desc *desc);
60
61
void __ipipe_end_edge_irq(unsigned irq, struct irq_desc *desc);
62
63
static inline void __ipipe_call_root_xirq_handler(unsigned irq,
64
						  void (*handler)(unsigned, void *))
65
{
66
	struct pt_regs *regs = &__raw_get_cpu_var(__ipipe_tick_regs);
67
68
	regs->orig_ax = ~__ipipe_get_irq_vector(irq);
69
70
	__asm__ __volatile__("movq  %%rsp, %%rax\n\t"
71
			     "pushq $0\n\t"
72
			     "pushq %%rax\n\t"
73
			     "pushfq\n\t"
74
			     "pushq %[kernel_cs]\n\t"
75
			     "pushq $__xirq_end\n\t"
76
			     "pushq %[vector]\n\t"
77
			     "subq  $9*8,%%rsp\n\t"
78
			     "movq  %%rdi,8*8(%%rsp)\n\t"
79
			     "movq  %%rsi,7*8(%%rsp)\n\t"
80
			     "movq  %%rdx,6*8(%%rsp)\n\t"
81
			     "movq  %%rcx,5*8(%%rsp)\n\t"
82
			     "movq  %%rax,4*8(%%rsp)\n\t"
83
			     "movq  %%r8,3*8(%%rsp)\n\t"
84
			     "movq  %%r9,2*8(%%rsp)\n\t"
85
			     "movq  %%r10,1*8(%%rsp)\n\t"
86
			     "movq  %%r11,(%%rsp)\n\t"
87
			     "call  *%[handler]\n\t"
88
			     "cli\n\t"
89
			     "jmp exit_intr\n\t"
90
			     "__xirq_end: cli\n"
91
			     : /* no output */
92
			     : [kernel_cs] "i" (__KERNEL_CS),
93
			       [vector] "rm" (regs->orig_ax),
94
			       [handler] "r" (handler), "D" (regs)
95
			     : "rax");
96
}
97
98
void irq_enter(void);
99
void irq_exit(void);
100
101
static inline void __ipipe_call_root_virq_handler(unsigned irq,
102
						  void (*handler)(unsigned, void *),
103
						  void *cookie)
104
{
105
	irq_enter();
106
	__asm__ __volatile__("movq  %%rsp, %%rax\n\t"
107
			     "pushq $0\n\t"
108
			     "pushq %%rax\n\t"
109
			     "pushfq\n\t"
110
			     "pushq %[kernel_cs]\n\t"
111
			     "pushq $__virq_end\n\t"
112
			     "pushq $-1\n\t"
113
			     "subq  $9*8,%%rsp\n\t"
114
			     "movq  %%rdi,8*8(%%rsp)\n\t"
115
			     "movq  %%rsi,7*8(%%rsp)\n\t"
116
			     "movq  %%rdx,6*8(%%rsp)\n\t"
117
			     "movq  %%rcx,5*8(%%rsp)\n\t"
118
			     "movq  %%rax,4*8(%%rsp)\n\t"
119
			     "movq  %%r8,3*8(%%rsp)\n\t"
120
			     "movq  %%r9,2*8(%%rsp)\n\t"
121
			     "movq  %%r10,1*8(%%rsp)\n\t"
122
			     "movq  %%r11,(%%rsp)\n\t"
123
			     "call  *%[handler]\n\t"
124
			     : /* no output */
125
			     : [kernel_cs] "i" (__KERNEL_CS),
126
			       [handler] "r" (handler), "D" (irq), "S" (cookie)
127
			     : "rax");
128
	irq_exit();
129
	__asm__ __volatile__("cli\n\t"
130
			     "jmp exit_intr\n\t"
131
			     "__virq_end: cli\n"
132
			     : /* no output */
133
			     : /* no input */);
134
}
135
136
/*
137
 * When running handlers, enable hw interrupts for all domains but the
138
 * one heading the pipeline, so that IRQs can never be significantly
139
 * deferred for the latter.
140
 */
141
#define __ipipe_run_isr(ipd, irq)					\
142
	do {								\
143
		if (!__ipipe_pipeline_head_p(ipd))			\
144
			local_irq_enable_hw();				\
145
		if (ipd == ipipe_root_domain) {				\
146
			if (likely(!ipipe_virtual_irq_p(irq))) 		\
147
				__ipipe_call_root_xirq_handler(		\
148
					irq, (ipd)->irqs[irq].handler);	\
149
			else						\
150
				__ipipe_call_root_virq_handler(		\
151
					irq, (ipd)->irqs[irq].handler,	\
152
					(ipd)->irqs[irq].cookie);	\
153
		} else {						\
154
			__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
155
			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
156
			__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
157
		}							\
158
		local_irq_disable_hw();					\
159
	} while(0)
160
161
#endif	/* !__X86_IPIPE_64_H */
(-)a/arch/x86/include/asm/ipipe_base.h (+210 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe_base.h
3
 *
4
 *   Copyright (C) 2007-2009 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_BASE_H
23
#define __X86_IPIPE_BASE_H
24
25
#include <linux/threads.h>
26
#include <asm/apicdef.h>
27
#include <asm/irq_vectors.h>
28
29
#ifdef CONFIG_X86_32
30
#define IPIPE_NR_FAULTS		33 /* 32 from IDT + iret_error */
31
#else
32
#define IPIPE_NR_FAULTS		32
33
#endif
34
35
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
36
/*
37
 * System interrupts are mapped beyond the last defined external IRQ
38
 * number.
39
 */
40
#define IPIPE_NR_XIRQS		(NR_IRQS + 32)
41
#define IPIPE_FIRST_APIC_IRQ	NR_IRQS
42
#define IPIPE_SERVICE_VECTOR0	(INVALIDATE_TLB_VECTOR_END + 1)
43
#define IPIPE_SERVICE_IPI0	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR0)
44
#define IPIPE_SERVICE_VECTOR1	(INVALIDATE_TLB_VECTOR_END + 2)
45
#define IPIPE_SERVICE_IPI1	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR1)
46
#define IPIPE_SERVICE_VECTOR2	(INVALIDATE_TLB_VECTOR_END + 3)
47
#define IPIPE_SERVICE_IPI2	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR2)
48
#define IPIPE_SERVICE_VECTOR3	(INVALIDATE_TLB_VECTOR_END + 4)
49
#define IPIPE_SERVICE_IPI3	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR3)
50
#ifdef CONFIG_SMP
51
#define IPIPE_CRITICAL_VECTOR	(INVALIDATE_TLB_VECTOR_END + 5)
52
#define IPIPE_CRITICAL_IPI	ipipe_apic_vector_irq(IPIPE_CRITICAL_VECTOR)
53
#endif
54
#define ipipe_apic_irq_vector(irq)  ((irq) - IPIPE_FIRST_APIC_IRQ + FIRST_SYSTEM_VECTOR)
55
#define ipipe_apic_vector_irq(vec)  ((vec) - FIRST_SYSTEM_VECTOR + IPIPE_FIRST_APIC_IRQ)
56
#else /* !(CONFIG_X86_64 || CONFIG_X86_LOCAL_APIC) */
57
#define IPIPE_NR_XIRQS		NR_IRQS
58
#endif /* !(CONFIG_X86_64 || CONFIG_X86_LOCAL_APIC) */
59
60
/* Pseudo-vectors used for kernel events */
61
#define IPIPE_FIRST_EVENT	IPIPE_NR_FAULTS
62
#define IPIPE_EVENT_SYSCALL	(IPIPE_FIRST_EVENT)
63
#define IPIPE_EVENT_SCHEDULE	(IPIPE_FIRST_EVENT + 1)
64
#define IPIPE_EVENT_SIGWAKE	(IPIPE_FIRST_EVENT + 2)
65
#define IPIPE_EVENT_SETSCHED	(IPIPE_FIRST_EVENT + 3)
66
#define IPIPE_EVENT_INIT	(IPIPE_FIRST_EVENT + 4)
67
#define IPIPE_EVENT_EXIT	(IPIPE_FIRST_EVENT + 5)
68
#define IPIPE_EVENT_CLEANUP	(IPIPE_FIRST_EVENT + 6)
69
#define IPIPE_LAST_EVENT	IPIPE_EVENT_CLEANUP
70
#define IPIPE_NR_EVENTS		(IPIPE_LAST_EVENT + 1)
71
72
#define ex_do_divide_error			0
73
#define ex_do_debug				1
74
/* NMI not pipelined. */
75
#define ex_do_int3				3
76
#define ex_do_overflow				4
77
#define ex_do_bounds				5
78
#define ex_do_invalid_op			6
79
#define ex_do_device_not_available		7
80
/* Double fault not pipelined. */
81
#define ex_do_coprocessor_segment_overrun	9
82
#define ex_do_invalid_TSS			10
83
#define ex_do_segment_not_present		11
84
#define ex_do_stack_segment			12
85
#define ex_do_general_protection		13
86
#define ex_do_page_fault			14
87
#define ex_do_spurious_interrupt_bug		15
88
#define ex_do_coprocessor_error			16
89
#define ex_do_alignment_check			17
90
#define ex_machine_check_vector			18
91
#define ex_reserved				ex_machine_check_vector
92
#define ex_do_simd_coprocessor_error		19
93
#define ex_do_iret_error			32
94
95
#ifndef __ASSEMBLY__
96
97
#ifdef CONFIG_SMP
98
99
#include <asm/alternative.h>
100
101
#ifdef CONFIG_X86_32
102
#define GET_ROOT_STATUS_ADDR					\
103
	"pushfl; cli;"						\
104
	"movl %%fs:per_cpu__this_cpu_off, %%eax;"		\
105
	"lea per_cpu__ipipe_percpu_darray(%%eax), %%eax;"
106
#define PUT_ROOT_STATUS_ADDR	"popfl;"
107
#define TEST_AND_SET_ROOT_STATUS \
108
	"btsl $0,(%%eax);"
109
#define TEST_ROOT_STATUS \
110
	"btl $0,(%%eax);"
111
#define ROOT_TEST_CLOBBER_LIST  "eax"
112
#else /* CONFIG_X86_64 */
113
#define GET_ROOT_STATUS_ADDR					\
114
	"pushfq; cli;"						\
115
	"movq %%gs:per_cpu__this_cpu_off, %%rax;"		\
116
	"lea per_cpu__ipipe_percpu_darray(%%rax), %%rax;"
117
#define PUT_ROOT_STATUS_ADDR	"popfq;"
118
#define TEST_AND_SET_ROOT_STATUS \
119
	"btsl $0,(%%rax);"
120
#define TEST_ROOT_STATUS \
121
	"btl $0,(%%rax);"
122
#define ROOT_TEST_CLOBBER_LIST  "rax"
123
#endif /* CONFIG_X86_64 */
124
125
static inline void __ipipe_stall_root(void)
126
{
127
	__asm__ __volatile__(GET_ROOT_STATUS_ADDR
128
			     LOCK_PREFIX
129
			     TEST_AND_SET_ROOT_STATUS
130
			     PUT_ROOT_STATUS_ADDR
131
			     : : : ROOT_TEST_CLOBBER_LIST, "memory");
132
}
133
134
static inline unsigned long __ipipe_test_and_stall_root(void)
135
{
136
	int oldbit;
137
138
	__asm__ __volatile__(GET_ROOT_STATUS_ADDR
139
			     LOCK_PREFIX
140
			     TEST_AND_SET_ROOT_STATUS
141
			     "sbbl %0,%0;"
142
			     PUT_ROOT_STATUS_ADDR
143
			     :"=r" (oldbit)
144
			     : : ROOT_TEST_CLOBBER_LIST, "memory");
145
	return oldbit;
146
}
147
148
static inline unsigned long __ipipe_test_root(void)
149
{
150
	int oldbit;
151
152
	__asm__ __volatile__(GET_ROOT_STATUS_ADDR
153
			     TEST_ROOT_STATUS
154
			     "sbbl %0,%0;"
155
			     PUT_ROOT_STATUS_ADDR
156
			     :"=r" (oldbit)
157
			     : : ROOT_TEST_CLOBBER_LIST);
158
	return oldbit;
159
}
160
161
#else /* !CONFIG_SMP */
162
163
#if __GNUC__ >= 4
164
/* Alias to ipipe_root_cpudom_var(status) */
165
extern unsigned long __ipipe_root_status;
166
#else
167
extern unsigned long *const __ipipe_root_status_addr;
168
#define __ipipe_root_status	(*__ipipe_root_status_addr)
169
#endif
170
171
static inline void __ipipe_stall_root(void)
172
{
173
	volatile unsigned long *p = &__ipipe_root_status;
174
	__asm__ __volatile__("btsl $0,%0;"
175
			     :"+m" (*p) : : "memory");
176
}
177
178
static inline unsigned long __ipipe_test_and_stall_root(void)
179
{
180
	volatile unsigned long *p = &__ipipe_root_status;
181
	int oldbit;
182
183
	__asm__ __volatile__("btsl $0,%1;"
184
			     "sbbl %0,%0;"
185
			     :"=r" (oldbit), "+m" (*p)
186
			     : : "memory");
187
	return oldbit;
188
}
189
190
static inline unsigned long __ipipe_test_root(void)
191
{
192
	volatile unsigned long *p = &__ipipe_root_status;
193
	int oldbit;
194
195
	__asm__ __volatile__("btl $0,%1;"
196
			     "sbbl %0,%0;"
197
			     :"=r" (oldbit)
198
			     :"m" (*p));
199
	return oldbit;
200
}
201
202
#endif /* !CONFIG_SMP */
203
204
void __ipipe_halt_root(void);
205
206
void __ipipe_serial_debug(const char *fmt, ...);
207
208
#endif	/* !__ASSEMBLY__ */
209
210
#endif	/* !__X86_IPIPE_BASE_H */
(-)a/arch/x86/include/asm/irq_vectors.h (+10 lines)
Lines 91-100 Link Here
91
#define THRESHOLD_APIC_VECTOR		0xf9
91
#define THRESHOLD_APIC_VECTOR		0xf9
92
#define REBOOT_VECTOR			0xf8
92
#define REBOOT_VECTOR			0xf8
93
93
94
#ifdef CONFIG_IPIPE
95
/* f0-f2 used for TLB flush, f3-f7 reserved for the I-pipe */
96
#define INVALIDATE_TLB_VECTOR_END	0xf2
97
#define INVALIDATE_TLB_VECTOR_START	0xf0
98
#define NUM_INVALIDATE_TLB_VECTORS	3
99
#else /* !CONFIG_IPIPE */
94
/* f0-f7 used for spreading out TLB flushes: */
100
/* f0-f7 used for spreading out TLB flushes: */
95
#define INVALIDATE_TLB_VECTOR_END	0xf7
101
#define INVALIDATE_TLB_VECTOR_END	0xf7
96
#define INVALIDATE_TLB_VECTOR_START	0xf0
102
#define INVALIDATE_TLB_VECTOR_START	0xf0
97
#define NUM_INVALIDATE_TLB_VECTORS	   8
103
#define NUM_INVALIDATE_TLB_VECTORS	   8
104
#endif
98
105
99
/*
106
/*
100
 * Local APIC timer IRQ vector is on a different priority level,
107
 * Local APIC timer IRQ vector is on a different priority level,
Lines 120-125 Link Here
120
 */
127
 */
121
#define MCE_SELF_VECTOR			0xeb
128
#define MCE_SELF_VECTOR			0xeb
122
129
130
/* I-pipe: Lowest number of vectors above */
131
#define FIRST_SYSTEM_VECTOR		0xea
132
123
/*
133
/*
124
 * First APIC vector available to drivers: (vectors 0x30-0xee) we
134
 * First APIC vector available to drivers: (vectors 0x30-0xee) we
125
 * start at 0x31(0x41) to spread out vectors evenly between priority
135
 * start at 0x31(0x41) to spread out vectors evenly between priority
(-)a/arch/x86/include/asm/irqflags.h (-4 / +125 lines)
Lines 4-9 Link Here
4
#include <asm/processor-flags.h>
4
#include <asm/processor-flags.h>
5
5
6
#ifndef __ASSEMBLY__
6
#ifndef __ASSEMBLY__
7
8
#include <linux/ipipe_base.h>
9
#include <linux/ipipe_trace.h>
10
7
/*
11
/*
8
 * Interrupt control:
12
 * Interrupt control:
9
 */
13
 */
Lines 12-17 static inline unsigned long native_save_fl(void) Link Here
12
{
16
{
13
	unsigned long flags;
17
	unsigned long flags;
14
18
19
#ifdef CONFIG_IPIPE
20
	flags = (!__ipipe_test_root()) << 9;
21
	barrier();
22
#else
15
	/*
23
	/*
16
	 * "=rm" is safe here, because "pop" adjusts the stack before
24
	 * "=rm" is safe here, because "pop" adjusts the stack before
17
	 * it evaluates its effective address -- this is part of the
25
	 * it evaluates its effective address -- this is part of the
Lines 22-52 static inline unsigned long native_save_fl(void) Link Here
22
		     : "=rm" (flags)
30
		     : "=rm" (flags)
23
		     : /* no input */
31
		     : /* no input */
24
		     : "memory");
32
		     : "memory");
33
#endif
25
34
26
	return flags;
35
	return flags;
27
}
36
}
28
37
29
static inline void native_restore_fl(unsigned long flags)
38
static inline void native_restore_fl(unsigned long flags)
30
{
39
{
40
#ifdef CONFIG_IPIPE
41
	barrier();
42
	__ipipe_restore_root(!(flags & X86_EFLAGS_IF));
43
#else
31
	asm volatile("push %0 ; popf"
44
	asm volatile("push %0 ; popf"
32
		     : /* no output */
45
		     : /* no output */
33
		     :"g" (flags)
46
		     :"g" (flags)
34
		     :"memory", "cc");
47
		     :"memory", "cc");
48
#endif
35
}
49
}
36
50
37
static inline void native_irq_disable(void)
51
static inline void native_irq_disable(void)
38
{
52
{
53
#ifdef CONFIG_IPIPE
54
	ipipe_check_context(ipipe_root_domain);
55
	__ipipe_stall_root();
56
	barrier();
57
#else
39
	asm volatile("cli": : :"memory");
58
	asm volatile("cli": : :"memory");
59
#endif
40
}
60
}
41
61
42
static inline void native_irq_enable(void)
62
static inline void native_irq_enable(void)
43
{
63
{
64
#ifdef CONFIG_IPIPE
65
	barrier();
66
	__ipipe_unstall_root();
67
#else
44
	asm volatile("sti": : :"memory");
68
	asm volatile("sti": : :"memory");
69
#endif
45
}
70
}
46
71
47
static inline void native_safe_halt(void)
72
static inline void native_safe_halt(void)
48
{
73
{
74
#ifdef CONFIG_IPIPE
75
	barrier();
76
	__ipipe_halt_root();
77
#else
49
	asm volatile("sti; hlt": : :"memory");
78
	asm volatile("sti; hlt": : :"memory");
79
#endif
50
}
80
}
51
81
52
static inline void native_halt(void)
82
static inline void native_halt(void)
Lines 71-76 static inline void raw_local_irq_restore(unsigned long flags) Link Here
71
	native_restore_fl(flags);
101
	native_restore_fl(flags);
72
}
102
}
73
103
104
static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
105
{
106
	/*
107
	 * Merge virtual and real interrupt mask bits into a single
108
	 * (32bit) word.
109
	 */
110
	return (real & ~(1L << 31)) | ((virt != 0) << 31);
111
}
112
113
static inline int raw_demangle_irq_bits(unsigned long *x)
114
{
115
	int virt = (*x & (1L << 31)) != 0;
116
	*x &= ~(1L << 31);
117
	return virt;
118
}
119
120
#define local_irq_save_hw_notrace(x) \
121
	__asm__ __volatile__("pushf ; pop %0 ; cli":"=g" (x): /* no input */ :"memory")
122
#define local_irq_restore_hw_notrace(x) \
123
	__asm__ __volatile__("push %0 ; popf": /* no output */ :"g" (x):"memory", "cc")
124
125
#define local_save_flags_hw(x)	__asm__ __volatile__("pushf ; pop %0":"=g" (x): /* no input */)
126
127
#define irqs_disabled_hw()		\
128
    ({					\
129
	unsigned long x;		\
130
	local_save_flags_hw(x);		\
131
	!((x) & X86_EFLAGS_IF);		\
132
    })
133
134
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
135
#define local_irq_disable_hw() do {			\
136
		if (!irqs_disabled_hw()) {		\
137
			local_irq_disable_hw_notrace();	\
138
			ipipe_trace_begin(0x80000000);	\
139
		}					\
140
	} while (0)
141
#define local_irq_enable_hw() do {			\
142
		if (irqs_disabled_hw()) {		\
143
			ipipe_trace_end(0x80000000);	\
144
			local_irq_enable_hw_notrace();	\
145
		}					\
146
	} while (0)
147
#define local_irq_save_hw(x) do {			\
148
		local_save_flags_hw(x);			\
149
		if ((x) & X86_EFLAGS_IF) {		\
150
			local_irq_disable_hw_notrace();	\
151
			ipipe_trace_begin(0x80000001);	\
152
		}					\
153
	} while (0)
154
#define local_irq_restore_hw(x) do {			\
155
		if ((x) & X86_EFLAGS_IF)		\
156
			ipipe_trace_end(0x80000001);	\
157
		local_irq_restore_hw_notrace(x);	\
158
	} while (0)
159
#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
160
#define local_irq_save_hw(x)		local_irq_save_hw_notrace(x)
161
#define local_irq_restore_hw(x)		local_irq_restore_hw_notrace(x)
162
#define local_irq_enable_hw()		local_irq_enable_hw_notrace()
163
#define local_irq_disable_hw()		local_irq_disable_hw_notrace()
164
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
165
166
#define local_irq_disable_hw_notrace()	__asm__ __volatile__("cli": : :"memory")
167
#define local_irq_enable_hw_notrace()	__asm__ __volatile__("sti": : :"memory")
168
74
static inline void raw_local_irq_disable(void)
169
static inline void raw_local_irq_disable(void)
75
{
170
{
76
	native_irq_disable();
171
	native_irq_disable();
Lines 104-119 static inline void halt(void) Link Here
104
 */
199
 */
105
static inline unsigned long __raw_local_irq_save(void)
200
static inline unsigned long __raw_local_irq_save(void)
106
{
201
{
202
#ifdef CONFIG_IPIPE
203
	unsigned long flags = (!__ipipe_test_and_stall_root()) << 9;
204
	barrier();
205
#else
107
	unsigned long flags = __raw_local_save_flags();
206
	unsigned long flags = __raw_local_save_flags();
108
207
109
	raw_local_irq_disable();
208
	raw_local_irq_disable();
209
#endif
110
210
111
	return flags;
211
	return flags;
112
}
212
}
113
#else
213
#else
114
214
115
#define ENABLE_INTERRUPTS(x)	sti
215
#ifdef CONFIG_IPIPE
116
#define DISABLE_INTERRUPTS(x)	cli
216
#ifdef CONFIG_X86_32
217
#define DISABLE_INTERRUPTS(clobbers)	PER_CPU(ipipe_percpu_darray, %eax); btsl $0,(%eax); sti
218
#define ENABLE_INTERRUPTS(clobbers)	call __ipipe_unstall_root
219
#else /* CONFIG_X86_64 */
220
/* Not worth virtualizing in x86_64 mode. */
221
#define DISABLE_INTERRUPTS(clobbers)	cli
222
#define ENABLE_INTERRUPTS(clobbers)	sti
223
#endif /* CONFIG_X86_64 */
224
#define ENABLE_INTERRUPTS_HW_COND	sti
225
#define DISABLE_INTERRUPTS_HW_COND	cli
226
#define DISABLE_INTERRUPTS_HW(clobbers)	cli
227
#define ENABLE_INTERRUPTS_HW(clobbers)	sti
228
#else /* !CONFIG_IPIPE */
229
#define ENABLE_INTERRUPTS(x)		sti
230
#define DISABLE_INTERRUPTS(x)		cli
231
#define ENABLE_INTERRUPTS_HW_COND
232
#define DISABLE_INTERRUPTS_HW_COND
233
#define DISABLE_INTERRUPTS_HW(clobbers)	DISABLE_INTERRUPTS(clobbers)
234
#define ENABLE_INTERRUPTS_HW(clobbers)	ENABLE_INTERRUPTS(clobbers)
235
#endif /* !CONFIG_IPIPE */
117
236
118
#ifdef CONFIG_X86_64
237
#ifdef CONFIG_X86_64
119
#define SWAPGS	swapgs
238
#define SWAPGS	swapgs
Lines 156-163 static inline unsigned long __raw_local_irq_save(void) Link Here
156
#define raw_local_save_flags(flags)				\
275
#define raw_local_save_flags(flags)				\
157
	do { (flags) = __raw_local_save_flags(); } while (0)
276
	do { (flags) = __raw_local_save_flags(); } while (0)
158
277
159
#define raw_local_irq_save(flags)				\
278
#define raw_local_irq_save(flags) do {			\
160
	do { (flags) = __raw_local_irq_save(); } while (0)
279
		ipipe_check_context(ipipe_root_domain);	\
280
		(flags) = __raw_local_irq_save();	\
281
	} while (0)
161
282
162
static inline int raw_irqs_disabled_flags(unsigned long flags)
283
static inline int raw_irqs_disabled_flags(unsigned long flags)
163
{
284
{
(-)a/arch/x86/include/asm/mmu_context.h (-3 / +19 lines)
Lines 30-40 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) Link Here
30
#endif
30
#endif
31
}
31
}
32
32
33
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
33
static inline void __switch_mm(struct mm_struct *prev, struct mm_struct *next,
34
			     struct task_struct *tsk)
34
			       struct task_struct *tsk)
35
{
35
{
36
	unsigned cpu = smp_processor_id();
36
	unsigned cpu = smp_processor_id();
37
37
38
#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
39
	WARN_ON_ONCE(!irqs_disabled_hw());
40
#endif
38
	if (likely(prev != next)) {
41
	if (likely(prev != next)) {
39
		/* stop flush ipis for the previous mm */
42
		/* stop flush ipis for the previous mm */
40
		cpumask_clear_cpu(cpu, mm_cpumask(prev));
43
		cpumask_clear_cpu(cpu, mm_cpumask(prev));
Lines 70-79 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, Link Here
70
#endif
73
#endif
71
}
74
}
72
75
76
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
77
			     struct task_struct *tsk)
78
{
79
	unsigned long flags;
80
	local_irq_save_hw_cond(flags);
81
	__switch_mm(prev, next, tsk);
82
	local_irq_restore_hw_cond(flags);
83
}
84
85
#define ipipe_mm_switch_protect(flags)	local_irq_save_hw_cond(flags)
86
#define ipipe_mm_switch_unprotect(flags) \
87
	local_irq_restore_hw_cond(flags)
88
73
#define activate_mm(prev, next)			\
89
#define activate_mm(prev, next)			\
74
do {						\
90
do {						\
75
	paravirt_activate_mm((prev), (next));	\
91
	paravirt_activate_mm((prev), (next));	\
76
	switch_mm((prev), (next), NULL);	\
92
	__switch_mm((prev), (next), NULL);	\
77
} while (0);
93
} while (0);
78
94
79
#ifdef CONFIG_X86_32
95
#ifdef CONFIG_X86_32
(-)a/arch/x86/include/asm/nmi.h (-1 / +1 lines)
Lines 29-35 extern void setup_apic_nmi_watchdog(void *); Link Here
29
extern void stop_apic_nmi_watchdog(void *);
29
extern void stop_apic_nmi_watchdog(void *);
30
extern void disable_timer_nmi_watchdog(void);
30
extern void disable_timer_nmi_watchdog(void);
31
extern void enable_timer_nmi_watchdog(void);
31
extern void enable_timer_nmi_watchdog(void);
32
extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
32
extern int (*nmi_watchdog_tick)(struct pt_regs *regs, unsigned reason);
33
extern void cpu_nmi_set_wd_enabled(void);
33
extern void cpu_nmi_set_wd_enabled(void);
34
34
35
extern atomic_t nmi_active;
35
extern atomic_t nmi_active;
(-)a/arch/x86/include/asm/processor.h (+1 lines)
Lines 435-440 struct thread_struct { Link Here
435
	unsigned short		ds;
435
	unsigned short		ds;
436
	unsigned short		fsindex;
436
	unsigned short		fsindex;
437
	unsigned short		gsindex;
437
	unsigned short		gsindex;
438
 	unsigned long		rip;
438
#endif
439
#endif
439
#ifdef CONFIG_X86_32
440
#ifdef CONFIG_X86_32
440
	unsigned long		ip;
441
	unsigned long		ip;
(-)a/arch/x86/include/asm/system.h (-1 / +9 lines)
Lines 126-133 do { \ Link Here
126
#define switch_to(prev, next, last) \
126
#define switch_to(prev, next, last) \
127
	asm volatile(SAVE_CONTEXT					  \
127
	asm volatile(SAVE_CONTEXT					  \
128
	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
128
	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
129
	     "movq $thread_return,%P[threadrip](%[prev])\n\t" /* save RIP */	  \
129
	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
130
	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
130
	     "call __switch_to\n\t"					  \
131
	     "pushq %P[threadrip](%[next])\n\t" /* restore RIP */	  \
132
	     "jmp __switch_to\n\t"					  \
131
	     ".globl thread_return\n"					  \
133
	     ".globl thread_return\n"					  \
132
	     "thread_return:\n\t"					  \
134
	     "thread_return:\n\t"					  \
133
	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
135
	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
Lines 141-146 do { \ Link Here
141
	       __switch_canary_oparam					  \
143
	       __switch_canary_oparam					  \
142
	     : [next] "S" (next), [prev] "D" (prev),			  \
144
	     : [next] "S" (next), [prev] "D" (prev),			  \
143
	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
145
	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
146
	       [threadrip] "i" (offsetof(struct task_struct, thread.rip)), \
144
	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
147
	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
145
	       [_tif_fork] "i" (_TIF_FORK),			  	  \
148
	       [_tif_fork] "i" (_TIF_FORK),			  	  \
146
	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
149
	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
Lines 305-312 static inline void native_wbinvd(void) Link Here
305
#else
308
#else
306
#define read_cr0()	(native_read_cr0())
309
#define read_cr0()	(native_read_cr0())
307
#define write_cr0(x)	(native_write_cr0(x))
310
#define write_cr0(x)	(native_write_cr0(x))
311
#ifdef CONFIG_IPIPE
312
#define read_cr2()	__raw_get_cpu_var(__ipipe_cr2)
313
#define write_cr2(x)	__raw_get_cpu_var(__ipipe_cr2) = (x)
314
#else /* !CONFIG_IPIPE */
308
#define read_cr2()	(native_read_cr2())
315
#define read_cr2()	(native_read_cr2())
309
#define write_cr2(x)	(native_write_cr2(x))
316
#define write_cr2(x)	(native_write_cr2(x))
317
#endif /* !CONFIG_IPIPE */
310
#define read_cr3()	(native_read_cr3())
318
#define read_cr3()	(native_read_cr3())
311
#define write_cr3(x)	(native_write_cr3(x))
319
#define write_cr3(x)	(native_write_cr3(x))
312
#define read_cr4()	(native_read_cr4())
320
#define read_cr4()	(native_read_cr4())
(-)a/arch/x86/include/asm/traps.h (-1 / +1 lines)
Lines 82-89 extern int panic_on_unrecovered_nmi; Link Here
82
void math_error(void __user *);
82
void math_error(void __user *);
83
void math_emulate(struct math_emu_info *);
83
void math_emulate(struct math_emu_info *);
84
#ifndef CONFIG_X86_32
84
#ifndef CONFIG_X86_32
85
asmlinkage void smp_thermal_interrupt(void);
86
asmlinkage void mce_threshold_interrupt(void);
85
asmlinkage void mce_threshold_interrupt(void);
87
#endif
86
#endif
87
asmlinkage void smp_thermal_interrupt(void);
88
88
89
#endif /* _ASM_X86_TRAPS_H */
89
#endif /* _ASM_X86_TRAPS_H */
(-)a/arch/x86/kernel/Makefile (+1 lines)
Lines 85-90 obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o Link Here
85
obj-$(CONFIG_KGDB)		+= kgdb.o
85
obj-$(CONFIG_KGDB)		+= kgdb.o
86
obj-$(CONFIG_VM86)		+= vm86_32.o
86
obj-$(CONFIG_VM86)		+= vm86_32.o
87
obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
87
obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
88
obj-$(CONFIG_IPIPE)		+= ipipe.o
88
89
89
obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
90
obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
90
91
(-)a/arch/x86/kernel/apic/apic.c (-10 / +14 lines)
Lines 446-452 static void lapic_timer_setup(enum clock_event_mode mode, Link Here
446
	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
446
	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
447
		return;
447
		return;
448
448
449
	local_irq_save(flags);
449
	local_irq_save_hw(flags);
450
450
451
	switch (mode) {
451
	switch (mode) {
452
	case CLOCK_EVT_MODE_PERIODIC:
452
	case CLOCK_EVT_MODE_PERIODIC:
Lines 466-472 static void lapic_timer_setup(enum clock_event_mode mode, Link Here
466
		break;
466
		break;
467
	}
467
	}
468
468
469
	local_irq_restore(flags);
469
	local_irq_restore_hw(flags);
470
}
470
}
471
471
472
/*
472
/*
Lines 982-988 void lapic_shutdown(void) Link Here
982
	if (!cpu_has_apic && !apic_from_smp_config())
982
	if (!cpu_has_apic && !apic_from_smp_config())
983
		return;
983
		return;
984
984
985
	local_irq_save(flags);
985
	local_irq_save_hw(flags);
986
986
987
#ifdef CONFIG_X86_32
987
#ifdef CONFIG_X86_32
988
	if (!enabled_via_apicbase)
988
	if (!enabled_via_apicbase)
Lines 992-998 void lapic_shutdown(void) Link Here
992
		disable_local_APIC();
992
		disable_local_APIC();
993
993
994
994
995
	local_irq_restore(flags);
995
	local_irq_restore_hw(flags);
996
}
996
}
997
997
998
/*
998
/*
Lines 1166-1171 static void __cpuinit lapic_setup_esr(void) Link Here
1166
			oldvalue, value);
1166
			oldvalue, value);
1167
}
1167
}
1168
1168
1169
int __ipipe_check_lapic(void)
1170
{
1171
	return !(lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY);
1172
}
1169
1173
1170
/**
1174
/**
1171
 * setup_local_APIC - setup the local APIC
1175
 * setup_local_APIC - setup the local APIC
Lines 1229-1235 void __cpuinit setup_local_APIC(void) Link Here
1229
		value = apic_read(APIC_ISR + i*0x10);
1233
		value = apic_read(APIC_ISR + i*0x10);
1230
		for (j = 31; j >= 0; j--) {
1234
		for (j = 31; j >= 0; j--) {
1231
			if (value & (1<<j))
1235
			if (value & (1<<j))
1232
				ack_APIC_irq();
1236
				__ack_APIC_irq();
1233
		}
1237
		}
1234
	}
1238
	}
1235
1239
Lines 1735-1741 void smp_spurious_interrupt(struct pt_regs *regs) Link Here
1735
	 */
1739
	 */
1736
	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1740
	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1737
	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1741
	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1738
		ack_APIC_irq();
1742
		__ack_APIC_irq();
1739
1743
1740
	inc_irq_stat(irq_spurious_count);
1744
	inc_irq_stat(irq_spurious_count);
1741
1745
Lines 2004-2016 static int lapic_suspend(struct sys_device *dev, pm_message_t state) Link Here
2004
		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2008
		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2005
#endif
2009
#endif
2006
2010
2007
	local_irq_save(flags);
2011
	local_irq_save_hw(flags);
2008
	disable_local_APIC();
2012
	disable_local_APIC();
2009
2013
2010
	if (intr_remapping_enabled)
2014
	if (intr_remapping_enabled)
2011
		disable_intr_remapping();
2015
		disable_intr_remapping();
2012
2016
2013
	local_irq_restore(flags);
2017
	local_irq_restore_hw(flags);
2014
	return 0;
2018
	return 0;
2015
}
2019
}
2016
2020
Lines 2025-2031 static int lapic_resume(struct sys_device *dev) Link Here
2025
	if (!apic_pm_state.active)
2029
	if (!apic_pm_state.active)
2026
		return 0;
2030
		return 0;
2027
2031
2028
	local_irq_save(flags);
2032
	local_irq_save_hw(flags);
2029
	if (intr_remapping_enabled) {
2033
	if (intr_remapping_enabled) {
2030
		ioapic_entries = alloc_ioapic_entries();
2034
		ioapic_entries = alloc_ioapic_entries();
2031
		if (!ioapic_entries) {
2035
		if (!ioapic_entries) {
Lines 2091-2097 static int lapic_resume(struct sys_device *dev) Link Here
2091
		free_ioapic_entries(ioapic_entries);
2095
		free_ioapic_entries(ioapic_entries);
2092
	}
2096
	}
2093
restore:
2097
restore:
2094
	local_irq_restore(flags);
2098
	local_irq_restore_hw(flags);
2095
2099
2096
	return ret;
2100
	return ret;
2097
}
2101
}
(-)a/arch/x86/kernel/apic/apic_flat_64.c (-2 / +2 lines)
Lines 72-80 static inline void _flat_send_IPI_mask(unsigned long mask, int vector) Link Here
72
{
72
{
73
	unsigned long flags;
73
	unsigned long flags;
74
74
75
	local_irq_save(flags);
75
	local_irq_save_hw(flags);
76
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
76
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
77
	local_irq_restore(flags);
77
	local_irq_restore_hw(flags);
78
}
78
}
79
79
80
static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
80
static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
(-)a/arch/x86/kernel/apic/io_apic.c (-9 / +118 lines)
Lines 75-82 Link Here
75
 */
75
 */
76
int sis_apic_bug = -1;
76
int sis_apic_bug = -1;
77
77
78
static DEFINE_SPINLOCK(ioapic_lock);
78
static IPIPE_DEFINE_SPINLOCK(ioapic_lock);
79
static DEFINE_SPINLOCK(vector_lock);
79
static IPIPE_DEFINE_SPINLOCK(vector_lock);
80
#ifdef CONFIG_IPIPE
81
unsigned long bugous_edge_irq_triggers[(NR_IRQS + BITS_PER_LONG - 1) / BITS_PER_LONG];
82
#endif
80
83
81
/*
84
/*
82
 * # of IRQ routing registers
85
 * # of IRQ routing registers
Lines 417-422 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned Link Here
417
	writel(value, &io_apic->data);
420
	writel(value, &io_apic->data);
418
}
421
}
419
422
423
#if !defined(CONFIG_IPIPE) || defined(CONFIG_SMP)
424
420
static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
425
static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
421
{
426
{
422
	struct irq_pin_list *entry;
427
	struct irq_pin_list *entry;
Lines 440-445 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) Link Here
440
	return false;
445
	return false;
441
}
446
}
442
447
448
#endif /* !CONFIG_IPIPE || CONFIG_SMP */
449
443
union entry_union {
450
union entry_union {
444
	struct { u32 w1, w2; };
451
	struct { u32 w1, w2; };
445
	struct IO_APIC_route_entry entry;
452
	struct IO_APIC_route_entry entry;
Lines 615-620 static void mask_IO_APIC_irq_desc(struct irq_desc *desc) Link Here
615
	BUG_ON(!cfg);
622
	BUG_ON(!cfg);
616
623
617
	spin_lock_irqsave(&ioapic_lock, flags);
624
	spin_lock_irqsave(&ioapic_lock, flags);
625
 	ipipe_irq_lock(desc->irq);
618
	__mask_IO_APIC_irq(cfg);
626
	__mask_IO_APIC_irq(cfg);
619
	spin_unlock_irqrestore(&ioapic_lock, flags);
627
	spin_unlock_irqrestore(&ioapic_lock, flags);
620
}
628
}
Lines 625-631 static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) Link Here
625
	unsigned long flags;
633
	unsigned long flags;
626
634
627
	spin_lock_irqsave(&ioapic_lock, flags);
635
	spin_lock_irqsave(&ioapic_lock, flags);
636
#ifdef CONFIG_IPIPE
637
 	if (test_and_clear_bit(desc->irq, &bugous_edge_irq_triggers[0]))
638
		__unmask_and_level_IO_APIC_irq(cfg);
639
	else
640
#endif
628
	__unmask_IO_APIC_irq(cfg);
641
	__unmask_IO_APIC_irq(cfg);
642
	ipipe_irq_unlock(desc->irq);
629
	spin_unlock_irqrestore(&ioapic_lock, flags);
643
	spin_unlock_irqrestore(&ioapic_lock, flags);
630
}
644
}
631
645
Lines 2250-2255 static unsigned int startup_ioapic_irq(unsigned int irq) Link Here
2250
	}
2264
	}
2251
	cfg = irq_cfg(irq);
2265
	cfg = irq_cfg(irq);
2252
	__unmask_IO_APIC_irq(cfg);
2266
	__unmask_IO_APIC_irq(cfg);
2267
	ipipe_irq_unlock(irq);
2253
	spin_unlock_irqrestore(&ioapic_lock, flags);
2268
	spin_unlock_irqrestore(&ioapic_lock, flags);
2254
2269
2255
	return was_pending;
2270
	return was_pending;
Lines 2529-2551 static void irq_complete_move(struct irq_desc **descp) Link Here
2529
static inline void irq_complete_move(struct irq_desc **descp) {}
2544
static inline void irq_complete_move(struct irq_desc **descp) {}
2530
#endif
2545
#endif
2531
2546
2547
#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP)
2548
2549
#ifdef CONFIG_INTR_REMAP
2550
static void eoi_ioapic_irq(struct irq_desc *desc);
2551
#else /* !CONFIG_INTR_REMAP */
2552
static inline void eoi_ioapic_irq(struct irq_desc *desc) {}
2553
#endif /* !CONFIG_INTR_REMAP */
2554
2555
static void move_apic_irq(unsigned int irq)
2556
{
2557
	struct irq_desc *desc = irq_to_desc(irq);
2558
	struct irq_cfg *cfg;
2559
2560
	if (desc->handle_irq == &handle_edge_irq) {
2561
		spin_lock(&desc->lock);
2562
		irq_complete_move(&desc);
2563
		move_native_irq(irq);
2564
		spin_unlock(&desc->lock);
2565
	} else if (desc->handle_irq == &handle_fasteoi_irq) {
2566
		spin_lock(&desc->lock);
2567
		irq_complete_move(&desc);
2568
		if (irq_remapped(irq))
2569
			eoi_ioapic_irq(desc);
2570
		if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2571
			cfg = desc->chip_data;
2572
			if (!io_apic_level_ack_pending(cfg))
2573
				move_masked_irq(irq);
2574
			unmask_IO_APIC_irq_desc(desc);
2575
		}
2576
		spin_unlock(&desc->lock);
2577
	} else
2578
		WARN_ON_ONCE(1);
2579
}
2580
#endif /* CONFIG_IPIPE && CONFIG_SMP */
2581
2532
static void ack_apic_edge(unsigned int irq)
2582
static void ack_apic_edge(unsigned int irq)
2533
{
2583
{
2584
#ifndef CONFIG_IPIPE
2534
	struct irq_desc *desc = irq_to_desc(irq);
2585
	struct irq_desc *desc = irq_to_desc(irq);
2535
2586
2536
	irq_complete_move(&desc);
2587
	irq_complete_move(&desc);
2537
	move_native_irq(irq);
2588
	move_native_irq(irq);
2538
	ack_APIC_irq();
2589
#endif /* CONFIG_IPIPE */
2590
	__ack_APIC_irq();
2539
}
2591
}
2540
2592
2541
atomic_t irq_mis_count;
2593
atomic_t irq_mis_count;
2542
2594
2543
static void ack_apic_level(unsigned int irq)
2595
static void ack_apic_level(unsigned int irq)
2544
{
2596
{
2545
	struct irq_desc *desc = irq_to_desc(irq);
2546
	unsigned long v;
2597
	unsigned long v;
2547
	int i;
2598
	int i;
2548
	struct irq_cfg *cfg;
2599
	struct irq_cfg *cfg;
2600
#ifndef CONFIG_IPIPE
2601
	struct irq_desc *desc = irq_to_desc(irq);
2549
	int do_unmask_irq = 0;
2602
	int do_unmask_irq = 0;
2550
2603
2551
	irq_complete_move(&desc);
2604
	irq_complete_move(&desc);
Lines 2628-2633 static void ack_apic_level(unsigned int irq) Link Here
2628
		__unmask_and_level_IO_APIC_irq(cfg);
2681
		__unmask_and_level_IO_APIC_irq(cfg);
2629
		spin_unlock(&ioapic_lock);
2682
		spin_unlock(&ioapic_lock);
2630
	}
2683
	}
2684
#else /* CONFIG_IPIPE */
2685
	/*
2686
	 * Prevent low priority IRQs grabbed by high priority domains
2687
	 * from being delayed, waiting for a high priority interrupt
2688
	 * handler running in a low priority domain to complete.
2689
	 */
2690
	cfg = irq_cfg(irq);
2691
	i = cfg->vector;
2692
	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2693
	spin_lock(&ioapic_lock);
2694
	if (unlikely(!(v & (1 << (i & 0x1f))))) {
2695
		/* IO-APIC erratum: see comment above. */
2696
		atomic_inc(&irq_mis_count);
2697
		__mask_and_edge_IO_APIC_irq(cfg);
2698
		set_bit(irq, &bugous_edge_irq_triggers[0]);
2699
	} else
2700
		__mask_IO_APIC_irq(cfg);
2701
	spin_unlock(&ioapic_lock);
2702
	__ack_APIC_irq();
2703
#endif /* CONFIG_IPIPE */
2631
}
2704
}
2632
2705
2633
#ifdef CONFIG_INTR_REMAP
2706
#ifdef CONFIG_INTR_REMAP
Lines 2656-2669 eoi_ioapic_irq(struct irq_desc *desc) Link Here
2656
2729
2657
static void ir_ack_apic_edge(unsigned int irq)
2730
static void ir_ack_apic_edge(unsigned int irq)
2658
{
2731
{
2659
	ack_APIC_irq();
2732
	__ack_APIC_irq();
2660
}
2733
}
2661
2734
2662
static void ir_ack_apic_level(unsigned int irq)
2735
static void ir_ack_apic_level(unsigned int irq)
2663
{
2736
{
2664
	struct irq_desc *desc = irq_to_desc(irq);
2737
	struct irq_desc *desc = irq_to_desc(irq);
2665
2738
2666
	ack_APIC_irq();
2739
	__ack_APIC_irq();
2667
	eoi_ioapic_irq(desc);
2740
	eoi_ioapic_irq(desc);
2668
}
2741
}
2669
#endif /* CONFIG_INTR_REMAP */
2742
#endif /* CONFIG_INTR_REMAP */
Lines 2677-2682 static struct irq_chip ioapic_chip __read_mostly = { Link Here
2677
	.eoi		= ack_apic_level,
2750
	.eoi		= ack_apic_level,
2678
#ifdef CONFIG_SMP
2751
#ifdef CONFIG_SMP
2679
	.set_affinity	= set_ioapic_affinity_irq,
2752
	.set_affinity	= set_ioapic_affinity_irq,
2753
#ifdef CONFIG_IPIPE
2754
	.move		= move_apic_irq,
2755
#endif
2680
#endif
2756
#endif
2681
	.retrigger	= ioapic_retrigger_irq,
2757
	.retrigger	= ioapic_retrigger_irq,
2682
};
2758
};
Lines 2691-2696 static struct irq_chip ir_ioapic_chip __read_mostly = { Link Here
2691
	.eoi		= ir_ack_apic_level,
2767
	.eoi		= ir_ack_apic_level,
2692
#ifdef CONFIG_SMP
2768
#ifdef CONFIG_SMP
2693
	.set_affinity	= set_ir_ioapic_affinity_irq,
2769
	.set_affinity	= set_ir_ioapic_affinity_irq,
2770
#ifdef CONFIG_IPIPE
2771
	.move		= move_apic_irq,
2772
#endif
2694
#endif
2773
#endif
2695
#endif
2774
#endif
2696
	.retrigger	= ioapic_retrigger_irq,
2775
	.retrigger	= ioapic_retrigger_irq,
Lines 2736-2758 static inline void init_IO_APIC_traps(void) Link Here
2736
2815
2737
static void mask_lapic_irq(unsigned int irq)
2816
static void mask_lapic_irq(unsigned int irq)
2738
{
2817
{
2739
	unsigned long v;
2818
	unsigned long v, flags;
2740
2819
2820
 	local_irq_save_hw_cond(flags);
2821
 	ipipe_irq_lock(irq);
2741
	v = apic_read(APIC_LVT0);
2822
	v = apic_read(APIC_LVT0);
2742
	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2823
	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2824
  	local_irq_restore_hw_cond(flags);
2743
}
2825
}
2744
2826
2745
static void unmask_lapic_irq(unsigned int irq)
2827
static void unmask_lapic_irq(unsigned int irq)
2746
{
2828
{
2747
	unsigned long v;
2829
	unsigned long v, flags;
2748
2830
2831
  	local_irq_save_hw_cond(flags);
2749
	v = apic_read(APIC_LVT0);
2832
	v = apic_read(APIC_LVT0);
2750
	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2833
	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2834
  	ipipe_irq_unlock(irq);
2835
  	local_irq_restore_hw_cond(flags);
2751
}
2836
}
2752
2837
2753
static void ack_lapic_irq(unsigned int irq)
2838
static void ack_lapic_irq(unsigned int irq)
2754
{
2839
{
2755
	ack_APIC_irq();
2840
	__ack_APIC_irq();
2756
}
2841
}
2757
2842
2758
static struct irq_chip lapic_chip __read_mostly = {
2843
static struct irq_chip lapic_chip __read_mostly = {
Lines 2760-2765 static struct irq_chip lapic_chip __read_mostly = { Link Here
2760
	.mask		= mask_lapic_irq,
2845
	.mask		= mask_lapic_irq,
2761
	.unmask		= unmask_lapic_irq,
2846
	.unmask		= unmask_lapic_irq,
2762
	.ack		= ack_lapic_irq,
2847
	.ack		= ack_lapic_irq,
2848
#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP)
2849
	.move		= move_apic_irq,
2850
#endif
2763
};
2851
};
2764
2852
2765
static void lapic_register_intr(int irq, struct irq_desc *desc)
2853
static void lapic_register_intr(int irq, struct irq_desc *desc)
Lines 3007-3012 static inline void __init check_timer(void) Link Here
3007
		    "...trying to set up timer as Virtual Wire IRQ...\n");
3095
		    "...trying to set up timer as Virtual Wire IRQ...\n");
3008
3096
3009
	lapic_register_intr(0, desc);
3097
	lapic_register_intr(0, desc);
3098
#if defined(CONFIG_IPIPE) && defined(CONFIG_X86_64)
3099
	irq_to_desc(0)->ipipe_ack = __ipipe_ack_edge_irq;
3100
	irq_to_desc(0)->ipipe_end = __ipipe_end_edge_irq;
3101
#endif
3010
	apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);	/* Fixed mode */
3102
	apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);	/* Fixed mode */
3011
	enable_8259A_irq(0);
3103
	enable_8259A_irq(0);
3012
3104
Lines 3404-3409 static struct irq_chip msi_chip = { Link Here
3404
	.ack		= ack_apic_edge,
3496
	.ack		= ack_apic_edge,
3405
#ifdef CONFIG_SMP
3497
#ifdef CONFIG_SMP
3406
	.set_affinity	= set_msi_irq_affinity,
3498
	.set_affinity	= set_msi_irq_affinity,
3499
#ifdef CONFIG_IPIPE
3500
	.move		= move_apic_irq,
3501
#endif
3407
#endif
3502
#endif
3408
	.retrigger	= ioapic_retrigger_irq,
3503
	.retrigger	= ioapic_retrigger_irq,
3409
};
3504
};
Lines 3416-3421 static struct irq_chip msi_ir_chip = { Link Here
3416
	.ack		= ir_ack_apic_edge,
3511
	.ack		= ir_ack_apic_edge,
3417
#ifdef CONFIG_SMP
3512
#ifdef CONFIG_SMP
3418
	.set_affinity	= ir_set_msi_irq_affinity,
3513
	.set_affinity	= ir_set_msi_irq_affinity,
3514
#ifdef CONFIG_IPIPE
3515
	.move	= move_apic_irq,
3516
#endif
3419
#endif
3517
#endif
3420
#endif
3518
#endif
3421
	.retrigger	= ioapic_retrigger_irq,
3519
	.retrigger	= ioapic_retrigger_irq,
Lines 3704-3709 static struct irq_chip ht_irq_chip = { Link Here
3704
	.ack		= ack_apic_edge,
3802
	.ack		= ack_apic_edge,
3705
#ifdef CONFIG_SMP
3803
#ifdef CONFIG_SMP
3706
	.set_affinity	= set_ht_irq_affinity,
3804
	.set_affinity	= set_ht_irq_affinity,
3805
#ifdef CONFIG_IPIPE
3806
	.move	= move_apic_irq,
3807
#endif
3707
#endif
3808
#endif
3708
	.retrigger	= ioapic_retrigger_irq,
3809
	.retrigger	= ioapic_retrigger_irq,
3709
};
3810
};
Lines 4075-4080 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) Link Here
4075
	return 0;
4176
	return 0;
4076
}
4177
}
4077
4178
4179
#ifdef CONFIG_IPIPE
4180
unsigned __ipipe_get_ioapic_irq_vector(int irq)
4181
{
4182
	return irq >= IPIPE_FIRST_APIC_IRQ && irq < IPIPE_NR_XIRQS ?
4183
		ipipe_apic_irq_vector(irq) : irq_cfg(irq)->vector;
4184
}
4185
#endif /* CONFIG_IPIPE */
4186
4078
/*
4187
/*
4079
 * This function currently is only a helper for the i386 smp boot process where
4188
 * This function currently is only a helper for the i386 smp boot process where
4080
 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4189
 * we need to reprogram the ioredtbls to cater for the cpus which have come online
(-)a/arch/x86/kernel/apic/ipi.c (-10 / +10 lines)
Lines 29-40 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) Link Here
29
	 * to an arbitrary mask, so I do a unicast to each CPU instead.
29
	 * to an arbitrary mask, so I do a unicast to each CPU instead.
30
	 * - mbligh
30
	 * - mbligh
31
	 */
31
	 */
32
	local_irq_save(flags);
32
	local_irq_save_hw(flags);
33
	for_each_cpu(query_cpu, mask) {
33
	for_each_cpu(query_cpu, mask) {
34
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
34
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
35
				query_cpu), vector, APIC_DEST_PHYSICAL);
35
				query_cpu), vector, APIC_DEST_PHYSICAL);
36
	}
36
	}
37
	local_irq_restore(flags);
37
	local_irq_restore_hw(flags);
38
}
38
}
39
39
40
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
40
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
Lines 46-59 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, Link Here
46
46
47
	/* See Hack comment above */
47
	/* See Hack comment above */
48
48
49
	local_irq_save(flags);
49
	local_irq_save_hw(flags);
50
	for_each_cpu(query_cpu, mask) {
50
	for_each_cpu(query_cpu, mask) {
51
		if (query_cpu == this_cpu)
51
		if (query_cpu == this_cpu)
52
			continue;
52
			continue;
53
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
53
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
54
				 query_cpu), vector, APIC_DEST_PHYSICAL);
54
				 query_cpu), vector, APIC_DEST_PHYSICAL);
55
	}
55
	}
56
	local_irq_restore(flags);
56
	local_irq_restore_hw(flags);
57
}
57
}
58
58
59
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
59
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
Lines 68-79 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, Link Here
68
	 * should be modified to do 1 message per cluster ID - mbligh
68
	 * should be modified to do 1 message per cluster ID - mbligh
69
	 */
69
	 */
70
70
71
	local_irq_save(flags);
71
	local_irq_save_hw(flags);
72
	for_each_cpu(query_cpu, mask)
72
	for_each_cpu(query_cpu, mask)
73
		__default_send_IPI_dest_field(
73
		__default_send_IPI_dest_field(
74
			apic->cpu_to_logical_apicid(query_cpu), vector,
74
			apic->cpu_to_logical_apicid(query_cpu), vector,
75
			apic->dest_logical);
75
			apic->dest_logical);
76
	local_irq_restore(flags);
76
	local_irq_restore_hw(flags);
77
}
77
}
78
78
79
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
79
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
Lines 85-91 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, Link Here
85
85
86
	/* See Hack comment above */
86
	/* See Hack comment above */
87
87
88
	local_irq_save(flags);
88
	local_irq_save_hw(flags);
89
	for_each_cpu(query_cpu, mask) {
89
	for_each_cpu(query_cpu, mask) {
90
		if (query_cpu == this_cpu)
90
		if (query_cpu == this_cpu)
91
			continue;
91
			continue;
Lines 93-99 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, Link Here
93
			apic->cpu_to_logical_apicid(query_cpu), vector,
93
			apic->cpu_to_logical_apicid(query_cpu), vector,
94
			apic->dest_logical);
94
			apic->dest_logical);
95
		}
95
		}
96
	local_irq_restore(flags);
96
	local_irq_restore_hw(flags);
97
}
97
}
98
98
99
#ifdef CONFIG_X86_32
99
#ifdef CONFIG_X86_32
Lines 109-118 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) Link Here
109
	if (WARN_ONCE(!mask, "empty IPI mask"))
109
	if (WARN_ONCE(!mask, "empty IPI mask"))
110
		return;
110
		return;
111
111
112
	local_irq_save(flags);
112
	local_irq_save_hw(flags);
113
	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
113
	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
114
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
114
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
115
	local_irq_restore(flags);
115
	local_irq_restore_hw(flags);
116
}
116
}
117
117
118
void default_send_IPI_allbutself(int vector)
118
void default_send_IPI_allbutself(int vector)
(-)a/arch/x86/kernel/apic/nmi.c (-1 / +5 lines)
Lines 59-64 static unsigned int nmi_hz = HZ; Link Here
59
static DEFINE_PER_CPU(short, wd_enabled);
59
static DEFINE_PER_CPU(short, wd_enabled);
60
static int endflag __initdata;
60
static int endflag __initdata;
61
61
62
static int default_nmi_watchdog_tick(struct pt_regs * regs, unsigned reason);
63
int (*nmi_watchdog_tick) (struct pt_regs * regs, unsigned reason) = &default_nmi_watchdog_tick;
64
EXPORT_SYMBOL(nmi_watchdog_tick);
65
62
static inline unsigned int get_nmi_count(int cpu)
66
static inline unsigned int get_nmi_count(int cpu)
63
{
67
{
64
	return per_cpu(irq_stat, cpu).__nmi_count;
68
	return per_cpu(irq_stat, cpu).__nmi_count;
Lines 387-393 void touch_nmi_watchdog(void) Link Here
387
EXPORT_SYMBOL(touch_nmi_watchdog);
391
EXPORT_SYMBOL(touch_nmi_watchdog);
388
392
389
notrace __kprobes int
393
notrace __kprobes int
390
nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
394
default_nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
391
{
395
{
392
	/*
396
	/*
393
	 * Since current_thread_info()-> is always on the stack, and we
397
	 * Since current_thread_info()-> is always on the stack, and we
(-)a/arch/x86/kernel/apic/x2apic_cluster.c (-6 / +6 lines)
Lines 61-73 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) Link Here
61
61
62
	x2apic_wrmsr_fence();
62
	x2apic_wrmsr_fence();
63
63
64
	local_irq_save(flags);
64
	local_irq_save_hw(flags);
65
	for_each_cpu(query_cpu, mask) {
65
	for_each_cpu(query_cpu, mask) {
66
		__x2apic_send_IPI_dest(
66
		__x2apic_send_IPI_dest(
67
			per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67
			per_cpu(x86_cpu_to_logical_apicid, query_cpu),
68
			vector, apic->dest_logical);
68
			vector, apic->dest_logical);
69
	}
69
	}
70
	local_irq_restore(flags);
70
	local_irq_restore_hw(flags);
71
}
71
}
72
72
73
static void
73
static void
Lines 79-85 static void Link Here
79
79
80
	x2apic_wrmsr_fence();
80
	x2apic_wrmsr_fence();
81
81
82
	local_irq_save(flags);
82
	local_irq_save_hw(flags);
83
	for_each_cpu(query_cpu, mask) {
83
	for_each_cpu(query_cpu, mask) {
84
		if (query_cpu == this_cpu)
84
		if (query_cpu == this_cpu)
85
			continue;
85
			continue;
Lines 87-93 static void Link Here
87
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
87
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
88
				vector, apic->dest_logical);
88
				vector, apic->dest_logical);
89
	}
89
	}
90
	local_irq_restore(flags);
90
	local_irq_restore_hw(flags);
91
}
91
}
92
92
93
static void x2apic_send_IPI_allbutself(int vector)
93
static void x2apic_send_IPI_allbutself(int vector)
Lines 98-104 static void x2apic_send_IPI_allbutself(int vector) Link Here
98
98
99
	x2apic_wrmsr_fence();
99
	x2apic_wrmsr_fence();
100
100
101
	local_irq_save(flags);
101
	local_irq_save_hw(flags);
102
	for_each_online_cpu(query_cpu) {
102
	for_each_online_cpu(query_cpu) {
103
		if (query_cpu == this_cpu)
103
		if (query_cpu == this_cpu)
104
			continue;
104
			continue;
Lines 106-112 static void x2apic_send_IPI_allbutself(int vector) Link Here
106
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
106
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
107
				vector, apic->dest_logical);
107
				vector, apic->dest_logical);
108
	}
108
	}
109
	local_irq_restore(flags);
109
	local_irq_restore_hw(flags);
110
}
110
}
111
111
112
static void x2apic_send_IPI_all(int vector)
112
static void x2apic_send_IPI_all(int vector)
(-)a/arch/x86/kernel/apic/x2apic_phys.c (-6 / +6 lines)
Lines 62-73 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) Link Here
62
62
63
	x2apic_wrmsr_fence();
63
	x2apic_wrmsr_fence();
64
64
65
	local_irq_save(flags);
65
	local_irq_save_hw(flags);
66
	for_each_cpu(query_cpu, mask) {
66
	for_each_cpu(query_cpu, mask) {
67
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
67
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
68
				       vector, APIC_DEST_PHYSICAL);
68
				       vector, APIC_DEST_PHYSICAL);
69
	}
69
	}
70
	local_irq_restore(flags);
70
	local_irq_restore_hw(flags);
71
}
71
}
72
72
73
static void
73
static void
Lines 79-92 static void Link Here
79
79
80
	x2apic_wrmsr_fence();
80
	x2apic_wrmsr_fence();
81
81
82
	local_irq_save(flags);
82
	local_irq_save_hw(flags);
83
	for_each_cpu(query_cpu, mask) {
83
	for_each_cpu(query_cpu, mask) {
84
		if (query_cpu != this_cpu)
84
		if (query_cpu != this_cpu)
85
			__x2apic_send_IPI_dest(
85
			__x2apic_send_IPI_dest(
86
				per_cpu(x86_cpu_to_apicid, query_cpu),
86
				per_cpu(x86_cpu_to_apicid, query_cpu),
87
				vector, APIC_DEST_PHYSICAL);
87
				vector, APIC_DEST_PHYSICAL);
88
	}
88
	}
89
	local_irq_restore(flags);
89
	local_irq_restore_hw(flags);
90
}
90
}
91
91
92
static void x2apic_send_IPI_allbutself(int vector)
92
static void x2apic_send_IPI_allbutself(int vector)
Lines 97-110 static void x2apic_send_IPI_allbutself(int vector) Link Here
97
97
98
	x2apic_wrmsr_fence();
98
	x2apic_wrmsr_fence();
99
99
100
	local_irq_save(flags);
100
	local_irq_save_hw(flags);
101
	for_each_online_cpu(query_cpu) {
101
	for_each_online_cpu(query_cpu) {
102
		if (query_cpu == this_cpu)
102
		if (query_cpu == this_cpu)
103
			continue;
103
			continue;
104
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
104
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
105
				       vector, APIC_DEST_PHYSICAL);
105
				       vector, APIC_DEST_PHYSICAL);
106
	}
106
	}
107
	local_irq_restore(flags);
107
	local_irq_restore_hw(flags);
108
}
108
}
109
109
110
static void x2apic_send_IPI_all(int vector)
110
static void x2apic_send_IPI_all(int vector)
(-)a/arch/x86/kernel/cpu/mtrr/cyrix.c (-2 / +10 lines)
Lines 18-24 cyrix_get_arr(unsigned int reg, unsigned long *base, Link Here
18
18
19
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
19
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
20
20
21
	local_irq_save(flags);
21
	local_irq_save_hw(flags);
22
22
23
	ccr3 = getCx86(CX86_CCR3);
23
	ccr3 = getCx86(CX86_CCR3);
24
	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
24
	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
Lines 28-34 cyrix_get_arr(unsigned int reg, unsigned long *base, Link Here
28
	rcr = getCx86(CX86_RCR_BASE + reg);
28
	rcr = getCx86(CX86_RCR_BASE + reg);
29
	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
29
	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
30
30
31
	local_irq_restore(flags);
31
	local_irq_restore_hw(flags);
32
32
33
	shift = ((unsigned char *) base)[1] & 0x0f;
33
	shift = ((unsigned char *) base)[1] & 0x0f;
34
	*base >>= PAGE_SHIFT;
34
	*base >>= PAGE_SHIFT;
Lines 178-183 static void cyrix_set_arr(unsigned int reg, unsigned long base, Link Here
178
			  unsigned long size, mtrr_type type)
178
			  unsigned long size, mtrr_type type)
179
{
179
{
180
	unsigned char arr, arr_type, arr_size;
180
	unsigned char arr, arr_type, arr_size;
181
	unsigned long flags;
181
182
182
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
183
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
183
184
Lines 221-226 static void cyrix_set_arr(unsigned int reg, unsigned long base, Link Here
221
		}
222
		}
222
	}
223
	}
223
224
225
	local_irq_save_hw(flags);
226
224
	prepare_set();
227
	prepare_set();
225
228
226
	base <<= PAGE_SHIFT;
229
	base <<= PAGE_SHIFT;
Lines 230-235 static void cyrix_set_arr(unsigned int reg, unsigned long base, Link Here
230
	setCx86(CX86_RCR_BASE + reg, arr_type);
233
	setCx86(CX86_RCR_BASE + reg, arr_type);
231
234
232
	post_set();
235
	post_set();
236
237
	local_irq_restore_hw(flags);
233
}
238
}
234
239
235
typedef struct {
240
typedef struct {
Lines 247-254 static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 }; Link Here
247
252
248
static void cyrix_set_all(void)
253
static void cyrix_set_all(void)
249
{
254
{
255
	unsigned long flags;
250
	int i;
256
	int i;
251
257
258
	local_irq_save_hw(flags);
252
	prepare_set();
259
	prepare_set();
253
260
254
	/* the CCRs are not contiguous */
261
	/* the CCRs are not contiguous */
Lines 263-268 static void cyrix_set_all(void) Link Here
263
	}
270
	}
264
271
265
	post_set();
272
	post_set();
273
	local_irq_restore_hw(flags);
266
}
274
}
267
275
268
static struct mtrr_ops cyrix_mtrr_ops = {
276
static struct mtrr_ops cyrix_mtrr_ops = {
(-)a/arch/x86/kernel/cpu/mtrr/generic.c (-5 / +5 lines)
Lines 635-641 static void generic_set_all(void) Link Here
635
	unsigned long mask, count;
635
	unsigned long mask, count;
636
	unsigned long flags;
636
	unsigned long flags;
637
637
638
	local_irq_save(flags);
638
	local_irq_save_hw(flags);
639
	prepare_set();
639
	prepare_set();
640
640
641
	/* Actually set the state */
641
	/* Actually set the state */
Lines 645-651 static void generic_set_all(void) Link Here
645
	pat_init();
645
	pat_init();
646
646
647
	post_set();
647
	post_set();
648
	local_irq_restore(flags);
648
	local_irq_restore_hw(flags);
649
649
650
	/* Use the atomic bitops to update the global mask */
650
	/* Use the atomic bitops to update the global mask */
651
	for (count = 0; count < sizeof mask * 8; ++count) {
651
	for (count = 0; count < sizeof mask * 8; ++count) {
Lines 669-680 static void generic_set_all(void) Link Here
669
static void generic_set_mtrr(unsigned int reg, unsigned long base,
669
static void generic_set_mtrr(unsigned int reg, unsigned long base,
670
			     unsigned long size, mtrr_type type)
670
			     unsigned long size, mtrr_type type)
671
{
671
{
672
	unsigned long flags;
672
	unsigned long flags, _flags;
673
	struct mtrr_var_range *vr;
673
	struct mtrr_var_range *vr;
674
674
675
	vr = &mtrr_state.var_ranges[reg];
675
	vr = &mtrr_state.var_ranges[reg];
676
676
677
	local_irq_save(flags);
677
	local_irq_save_full(flags, _flags);
678
	prepare_set();
678
	prepare_set();
679
679
680
	if (size == 0) {
680
	if (size == 0) {
Lines 695-701 static void generic_set_mtrr(unsigned int reg, unsigned long base, Link Here
695
	}
695
	}
696
696
697
	post_set();
697
	post_set();
698
	local_irq_restore(flags);
698
	local_irq_restore_full(flags, _flags);
699
}
699
}
700
700
701
int generic_validate_add_page(unsigned long base, unsigned long size,
701
int generic_validate_add_page(unsigned long base, unsigned long size,
(-)a/arch/x86/kernel/dumpstack.c (+1 lines)
Lines 327-332 die_nmi(char *str, struct pt_regs *regs, int do_panic) Link Here
327
	local_irq_enable();
327
	local_irq_enable();
328
	do_exit(SIGBUS);
328
	do_exit(SIGBUS);
329
}
329
}
330
EXPORT_SYMBOL_GPL(die_nmi);
330
331
331
static int __init oops_setup(char *s)
332
static int __init oops_setup(char *s)
332
{
333
{
(-)a/arch/x86/kernel/dumpstack_32.c (+3 lines)
Lines 108-113 void show_registers(struct pt_regs *regs) Link Here
108
	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
108
	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
109
		TASK_COMM_LEN, current->comm, task_pid_nr(current),
109
		TASK_COMM_LEN, current->comm, task_pid_nr(current),
110
		current_thread_info(), current, task_thread_info(current));
110
		current_thread_info(), current, task_thread_info(current));
111
#ifdef CONFIG_IPIPE
112
	printk(KERN_EMERG "I-pipe domain %s\n", ipipe_current_domain->name);
113
#endif /* CONFIG_IPIPE */
111
	/*
114
	/*
112
	 * When in-kernel, we also print out the stack and code at the
115
	 * When in-kernel, we also print out the stack and code at the
113
	 * time of the fault..
116
	 * time of the fault..
(-)a/arch/x86/kernel/dumpstack_64.c (+5 lines)
Lines 254-259 void show_registers(struct pt_regs *regs) Link Here
254
	sp = regs->sp;
254
	sp = regs->sp;
255
	printk("CPU %d ", cpu);
255
	printk("CPU %d ", cpu);
256
	__show_regs(regs, 1);
256
	__show_regs(regs, 1);
257
#ifdef CONFIG_IPIPE
258
	if (ipipe_current_domain != ipipe_root_domain)
259
		printk("I-pipe domain %s\n", ipipe_current_domain->name);
260
	else
261
#endif /* CONFIG_IPIPE */
257
	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
262
	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
258
		cur->comm, cur->pid, task_thread_info(cur), cur);
263
		cur->comm, cur->pid, task_thread_info(cur), cur);
259
264
(-)a/arch/x86/kernel/entry_32.S (-24 / +136 lines)
Lines 44-49 Link Here
44
#include <linux/linkage.h>
44
#include <linux/linkage.h>
45
#include <asm/thread_info.h>
45
#include <asm/thread_info.h>
46
#include <asm/irqflags.h>
46
#include <asm/irqflags.h>
47
#include <asm/ipipe_base.h>
47
#include <asm/errno.h>
48
#include <asm/errno.h>
48
#include <asm/segment.h>
49
#include <asm/segment.h>
49
#include <asm/smp.h>
50
#include <asm/smp.h>
Lines 79-84 Link Here
79
80
80
#define nr_syscalls ((syscall_table_size)/4)
81
#define nr_syscalls ((syscall_table_size)/4)
81
82
83
#ifdef CONFIG_IPIPE
84
#define EMULATE_ROOT_IRET(bypass) \
85
				call __ipipe_unstall_iret_root ; \
86
				TRACE_IRQS_ON ; \
87
				bypass: \
88
				movl PT_EAX(%esp),%eax
89
#define TEST_PREEMPTIBLE(regs)  call __ipipe_kpreempt_root ; testl %eax,%eax
90
#define CATCH_ROOT_SYSCALL(bypass1,bypass2)	\
91
				movl  %esp,%eax ; \
92
				call __ipipe_syscall_root ; \
93
				testl  %eax,%eax ; \
94
				js    bypass1 ; \
95
				jne   bypass2 ; \
96
				movl PT_ORIG_EAX(%esp),%eax
97
#define PUSH_XCODE(v)		pushl $ ex_ ## v
98
#define PUSH_XVEC(v)		pushl $ ex_ ## v
99
#define HANDLE_EXCEPTION(code)	movl %code,%ecx ; \
100
				call __ipipe_handle_exception ; \
101
				testl %eax,%eax	; \
102
				jnz restore_ret
103
#define DIVERT_EXCEPTION(code)	movl $(__USER_DS), %ecx	; \
104
				movl %ecx, %ds ; \
105
				movl %ecx, %es ; \
106
				movl %esp, %eax	; \
107
				movl $ex_ ## code,%edx ; \
108
				call __ipipe_divert_exception ; \
109
				testl %eax,%eax	; \
110
				jnz restore_ret
111
112
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
113
# define IPIPE_TRACE_IRQ_ENTER \
114
	lea PT_EIP-4(%esp), %ebp; \
115
	movl PT_ORIG_EAX(%esp), %eax; \
116
	call ipipe_trace_begin
117
# define IPIPE_TRACE_IRQ_EXIT \
118
	pushl %eax; \
119
	movl PT_ORIG_EAX+4(%esp), %eax; \
120
	call ipipe_trace_end; \
121
	popl %eax
122
#else  /* !CONFIG_IPIPE_TRACE_IRQSOFF */
123
#define IPIPE_TRACE_IRQ_ENTER
124
#define IPIPE_TRACE_IRQ_EXIT
125
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
126
#else /* !CONFIG_IPIPE */
127
#define EMULATE_ROOT_IRET(bypass)
128
#define TEST_PREEMPTIBLE(regs)		testl $X86_EFLAGS_IF,PT_EFLAGS(regs)
129
#define CATCH_ROOT_SYSCALL(bypass1,bypass2)
130
#define PUSH_XCODE(v)			pushl $v
131
#define PUSH_XVEC(v)			pushl v
132
#define HANDLE_EXCEPTION(code)		call *%code
133
#define DIVERT_EXCEPTION(code)
134
#endif /* CONFIG_IPIPE */
135
	
82
#ifdef CONFIG_PREEMPT
136
#ifdef CONFIG_PREEMPT
83
#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
137
#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
84
#else
138
#else
Lines 318-323 Link Here
318
.endm
372
.endm
319
373
320
ENTRY(ret_from_fork)
374
ENTRY(ret_from_fork)
375
	ENABLE_INTERRUPTS_HW_COND
321
	CFI_STARTPROC
376
	CFI_STARTPROC
322
	pushl %eax
377
	pushl %eax
323
	CFI_ADJUST_CFA_OFFSET 4
378
	CFI_ADJUST_CFA_OFFSET 4
Lines 345-351 END(ret_from_fork) Link Here
345
	RING0_PTREGS_FRAME
400
	RING0_PTREGS_FRAME
346
ret_from_exception:
401
ret_from_exception:
347
	preempt_stop(CLBR_ANY)
402
	preempt_stop(CLBR_ANY)
348
ret_from_intr:
403
ENTRY(ret_from_intr)
349
	GET_THREAD_INFO(%ebp)
404
	GET_THREAD_INFO(%ebp)
350
check_userspace:
405
check_userspace:
351
	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
406
	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
Lines 369-382 END(ret_from_exception) Link Here
369
424
370
#ifdef CONFIG_PREEMPT
425
#ifdef CONFIG_PREEMPT
371
ENTRY(resume_kernel)
426
ENTRY(resume_kernel)
372
	DISABLE_INTERRUPTS(CLBR_ANY)
427
	DISABLE_INTERRUPTS_HW(CLBR_ANY)
373
	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
428
	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
374
	jnz restore_all
429
	jnz restore_all
375
need_resched:
430
need_resched:
376
	movl TI_flags(%ebp), %ecx	# need_resched set ?
431
	movl TI_flags(%ebp), %ecx	# need_resched set ?
377
	testb $_TIF_NEED_RESCHED, %cl
432
	testb $_TIF_NEED_RESCHED, %cl
378
	jz restore_all
433
	jz restore_all
379
	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
434
   	TEST_PREEMPTIBLE(%esp)		# interrupts off (exception path) ?
380
	jz restore_all
435
	jz restore_all
381
	call preempt_schedule_irq
436
	call preempt_schedule_irq
382
	jmp need_resched
437
	jmp need_resched
Lines 424-430 sysenter_past_esp: Link Here
424
	pushl %eax
479
	pushl %eax
425
	CFI_ADJUST_CFA_OFFSET 4
480
	CFI_ADJUST_CFA_OFFSET 4
426
	SAVE_ALL
481
	SAVE_ALL
427
	ENABLE_INTERRUPTS(CLBR_NONE)
482
	ENABLE_INTERRUPTS_HW(CLBR_NONE)
428
483
429
/*
484
/*
430
 * Load the potential sixth argument from user stack.
485
 * Load the potential sixth argument from user stack.
Lines 440-445 sysenter_past_esp: Link Here
440
.previous
495
.previous
441
496
442
	GET_THREAD_INFO(%ebp)
497
	GET_THREAD_INFO(%ebp)
498
	CATCH_ROOT_SYSCALL(sysenter_tail,sysenter_out)
443
499
444
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
500
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
445
	jnz sysenter_audit
501
	jnz sysenter_audit
Lines 448-453 sysenter_do_call: Link Here
448
	jae syscall_badsys
504
	jae syscall_badsys
449
	call *sys_call_table(,%eax,4)
505
	call *sys_call_table(,%eax,4)
450
	movl %eax,PT_EAX(%esp)
506
	movl %eax,PT_EAX(%esp)
507
sysenter_tail:
451
	LOCKDEP_SYS_EXIT
508
	LOCKDEP_SYS_EXIT
452
	DISABLE_INTERRUPTS(CLBR_ANY)
509
	DISABLE_INTERRUPTS(CLBR_ANY)
453
	TRACE_IRQS_OFF
510
	TRACE_IRQS_OFF
Lines 456-465 sysenter_do_call: Link Here
456
	jne sysexit_audit
513
	jne sysexit_audit
457
sysenter_exit:
514
sysenter_exit:
458
/* if something modifies registers it must also disable sysexit */
515
/* if something modifies registers it must also disable sysexit */
516
 	EMULATE_ROOT_IRET(sysenter_out)
459
	movl PT_EIP(%esp), %edx
517
	movl PT_EIP(%esp), %edx
460
	movl PT_OLDESP(%esp), %ecx
518
	movl PT_OLDESP(%esp), %ecx
461
	xorl %ebp,%ebp
519
	xorl %ebp,%ebp
462
	TRACE_IRQS_ON
520
#ifndef CONFIG_IPIPE
521
  	TRACE_IRQS_ON
522
#endif
463
1:	mov  PT_FS(%esp), %fs
523
1:	mov  PT_FS(%esp), %fs
464
	PTGS_TO_GS
524
	PTGS_TO_GS
465
	ENABLE_INTERRUPTS_SYSEXIT
525
	ENABLE_INTERRUPTS_SYSEXIT
Lines 520-525 ENTRY(system_call) Link Here
520
	CFI_ADJUST_CFA_OFFSET 4
580
	CFI_ADJUST_CFA_OFFSET 4
521
	SAVE_ALL
581
	SAVE_ALL
522
	GET_THREAD_INFO(%ebp)
582
	GET_THREAD_INFO(%ebp)
583
 	CATCH_ROOT_SYSCALL(syscall_exit,restore_ret)
523
					# system call tracing in operation / emulation
584
					# system call tracing in operation / emulation
524
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
585
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
525
	jnz syscall_trace_entry
586
	jnz syscall_trace_entry
Lines 552-557 restore_all_notrace: Link Here
552
	CFI_REMEMBER_STATE
613
	CFI_REMEMBER_STATE
553
	je ldt_ss			# returning to user-space with LDT SS
614
	je ldt_ss			# returning to user-space with LDT SS
554
restore_nocheck:
615
restore_nocheck:
616
#ifdef CONFIG_IPIPE
617
	call __ipipe_unstall_iret_root
618
#endif /* CONFIG_IPIPE */
619
restore_ret:
555
	RESTORE_REGS 4			# skip orig_eax/error_code
620
	RESTORE_REGS 4			# skip orig_eax/error_code
556
	CFI_ADJUST_CFA_OFFSET -4
621
	CFI_ADJUST_CFA_OFFSET -4
557
irq_return:
622
irq_return:
Lines 559-565 irq_return: Link Here
559
.section .fixup,"ax"
624
.section .fixup,"ax"
560
ENTRY(iret_exc)
625
ENTRY(iret_exc)
561
	pushl $0			# no error code
626
	pushl $0			# no error code
562
	pushl $do_iret_error
627
  	PUSH_XCODE(do_iret_error)
563
	jmp error_code
628
	jmp error_code
564
.previous
629
.previous
565
.section __ex_table,"a"
630
.section __ex_table,"a"
Lines 613-619 ldt_ss: Link Here
613
	/* Disable interrupts, but do not irqtrace this section: we
678
	/* Disable interrupts, but do not irqtrace this section: we
614
	 * will soon execute iret and the tracer was already set to
679
	 * will soon execute iret and the tracer was already set to
615
	 * the irqstate after the iret */
680
	 * the irqstate after the iret */
616
	DISABLE_INTERRUPTS(CLBR_EAX)
681
	DISABLE_INTERRUPTS_HW(CLBR_EAX)
617
	lss (%esp), %esp		/* switch to espfix segment */
682
	lss (%esp), %esp		/* switch to espfix segment */
618
	CFI_ADJUST_CFA_OFFSET -8
683
	CFI_ADJUST_CFA_OFFSET -8
619
	jmp restore_nocheck
684
	jmp restore_nocheck
Lines 627-632 work_pending: Link Here
627
	testb $_TIF_NEED_RESCHED, %cl
692
	testb $_TIF_NEED_RESCHED, %cl
628
	jz work_notifysig
693
	jz work_notifysig
629
work_resched:
694
work_resched:
695
	ENABLE_INTERRUPTS_HW_COND
630
	call schedule
696
	call schedule
631
	LOCKDEP_SYS_EXIT
697
	LOCKDEP_SYS_EXIT
632
	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
698
	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
Lines 799-804 END(irq_entries_start) Link Here
799
END(interrupt)
865
END(interrupt)
800
.previous
866
.previous
801
867
868
#ifdef CONFIG_IPIPE
869
	.p2align CONFIG_X86_L1_CACHE_SHIFT
870
common_interrupt:
871
	addl $-0x80,(%esp)	/* Adjust vector into the [-256,-1] range */
872
	SAVE_ALL
873
	IPIPE_TRACE_IRQ_ENTER
874
	movl %esp, %eax
875
	call *ipipe_irq_handler
876
	IPIPE_TRACE_IRQ_EXIT
877
	testl %eax,%eax
878
	jnz  ret_from_intr
879
	jmp restore_ret
880
	CFI_ENDPROC
881
882
#define BUILD_INTERRUPT3(name, nr, fn)	\
883
ENTRY(name)				\
884
	RING0_INT_FRAME;		\
885
	pushl $~(nr);			\
886
	CFI_ADJUST_CFA_OFFSET 4;	\
887
	SAVE_ALL;			\
888
	IPIPE_TRACE_IRQ_ENTER;		\
889
 	movl %esp, %eax;		\
890
	call *ipipe_irq_handler;	\
891
	IPIPE_TRACE_IRQ_EXIT;		\
892
	testl %eax,%eax;		\
893
	jnz  ret_from_intr;		\
894
	jmp restore_ret;	\
895
	CFI_ENDPROC
896
897
#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
898
899
#ifdef CONFIG_X86_LOCAL_APIC
900
	BUILD_INTERRUPT(ipipe_ipi0,IPIPE_SERVICE_VECTOR0)
901
	BUILD_INTERRUPT(ipipe_ipi1,IPIPE_SERVICE_VECTOR1)
902
	BUILD_INTERRUPT(ipipe_ipi2,IPIPE_SERVICE_VECTOR2)
903
	BUILD_INTERRUPT(ipipe_ipi3,IPIPE_SERVICE_VECTOR3)
904
#ifdef CONFIG_SMP
905
	BUILD_INTERRUPT(ipipe_ipiX,IPIPE_CRITICAL_VECTOR)
906
#endif	
907
#endif
908
909
#else /* !CONFIG_IPIPE */
802
/*
910
/*
803
 * the CPU automatically disables interrupts when executing an IRQ vector,
911
 * the CPU automatically disables interrupts when executing an IRQ vector,
804
 * so IRQ-flags tracing has to follow that:
912
 * so IRQ-flags tracing has to follow that:
Lines 829-834 ENDPROC(name) Link Here
829
937
830
#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
938
#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
831
939
940
#endif /* !CONFIG_IPIPE */
941
832
/* The include is where all of the SMP etc. interrupts come from */
942
/* The include is where all of the SMP etc. interrupts come from */
833
#include <asm/entry_arch.h>
943
#include <asm/entry_arch.h>
834
944
Lines 836-842 ENTRY(coprocessor_error) Link Here
836
	RING0_INT_FRAME
946
	RING0_INT_FRAME
837
	pushl $0
947
	pushl $0
838
	CFI_ADJUST_CFA_OFFSET 4
948
	CFI_ADJUST_CFA_OFFSET 4
839
	pushl $do_coprocessor_error
949
 	PUSH_XCODE(do_coprocessor_error)
840
	CFI_ADJUST_CFA_OFFSET 4
950
	CFI_ADJUST_CFA_OFFSET 4
841
	jmp error_code
951
	jmp error_code
842
	CFI_ENDPROC
952
	CFI_ENDPROC
Lines 846-852 ENTRY(simd_coprocessor_error) Link Here
846
	RING0_INT_FRAME
956
	RING0_INT_FRAME
847
	pushl $0
957
	pushl $0
848
	CFI_ADJUST_CFA_OFFSET 4
958
	CFI_ADJUST_CFA_OFFSET 4
849
	pushl $do_simd_coprocessor_error
959
 	PUSH_XCODE(do_simd_coprocessor_error)
850
	CFI_ADJUST_CFA_OFFSET 4
960
	CFI_ADJUST_CFA_OFFSET 4
851
	jmp error_code
961
	jmp error_code
852
	CFI_ENDPROC
962
	CFI_ENDPROC
Lines 856-862 ENTRY(device_not_available) Link Here
856
	RING0_INT_FRAME
966
	RING0_INT_FRAME
857
	pushl $-1			# mark this as an int
967
	pushl $-1			# mark this as an int
858
	CFI_ADJUST_CFA_OFFSET 4
968
	CFI_ADJUST_CFA_OFFSET 4
859
	pushl $do_device_not_available
969
 	PUSH_XCODE(do_device_not_available)
860
	CFI_ADJUST_CFA_OFFSET 4
970
	CFI_ADJUST_CFA_OFFSET 4
861
	jmp error_code
971
	jmp error_code
862
	CFI_ENDPROC
972
	CFI_ENDPROC
Lines 881-887 ENTRY(overflow) Link Here
881
	RING0_INT_FRAME
991
	RING0_INT_FRAME
882
	pushl $0
992
	pushl $0
883
	CFI_ADJUST_CFA_OFFSET 4
993
	CFI_ADJUST_CFA_OFFSET 4
884
	pushl $do_overflow
994
 	PUSH_XCODE(do_overflow)
885
	CFI_ADJUST_CFA_OFFSET 4
995
	CFI_ADJUST_CFA_OFFSET 4
886
	jmp error_code
996
	jmp error_code
887
	CFI_ENDPROC
997
	CFI_ENDPROC
Lines 891-897 ENTRY(bounds) Link Here
891
	RING0_INT_FRAME
1001
	RING0_INT_FRAME
892
	pushl $0
1002
	pushl $0
893
	CFI_ADJUST_CFA_OFFSET 4
1003
	CFI_ADJUST_CFA_OFFSET 4
894
	pushl $do_bounds
1004
 	PUSH_XCODE(do_bounds)
895
	CFI_ADJUST_CFA_OFFSET 4
1005
	CFI_ADJUST_CFA_OFFSET 4
896
	jmp error_code
1006
	jmp error_code
897
	CFI_ENDPROC
1007
	CFI_ENDPROC
Lines 901-907 ENTRY(invalid_op) Link Here
901
	RING0_INT_FRAME
1011
	RING0_INT_FRAME
902
	pushl $0
1012
	pushl $0
903
	CFI_ADJUST_CFA_OFFSET 4
1013
	CFI_ADJUST_CFA_OFFSET 4
904
	pushl $do_invalid_op
1014
 	PUSH_XCODE(do_invalid_op)
905
	CFI_ADJUST_CFA_OFFSET 4
1015
	CFI_ADJUST_CFA_OFFSET 4
906
	jmp error_code
1016
	jmp error_code
907
	CFI_ENDPROC
1017
	CFI_ENDPROC
Lines 911-917 ENTRY(coprocessor_segment_overrun) Link Here
911
	RING0_INT_FRAME
1021
	RING0_INT_FRAME
912
	pushl $0
1022
	pushl $0
913
	CFI_ADJUST_CFA_OFFSET 4
1023
	CFI_ADJUST_CFA_OFFSET 4
914
	pushl $do_coprocessor_segment_overrun
1024
  	PUSH_XCODE(do_coprocessor_segment_overrun)
915
	CFI_ADJUST_CFA_OFFSET 4
1025
	CFI_ADJUST_CFA_OFFSET 4
916
	jmp error_code
1026
	jmp error_code
917
	CFI_ENDPROC
1027
	CFI_ENDPROC
Lines 919-925 END(coprocessor_segment_overrun) Link Here
919
1029
920
ENTRY(invalid_TSS)
1030
ENTRY(invalid_TSS)
921
	RING0_EC_FRAME
1031
	RING0_EC_FRAME
922
	pushl $do_invalid_TSS
1032
  	PUSH_XCODE(do_invalid_TSS)
923
	CFI_ADJUST_CFA_OFFSET 4
1033
	CFI_ADJUST_CFA_OFFSET 4
924
	jmp error_code
1034
	jmp error_code
925
	CFI_ENDPROC
1035
	CFI_ENDPROC
Lines 927-933 END(invalid_TSS) Link Here
927
1037
928
ENTRY(segment_not_present)
1038
ENTRY(segment_not_present)
929
	RING0_EC_FRAME
1039
	RING0_EC_FRAME
930
	pushl $do_segment_not_present
1040
  	PUSH_XCODE(do_segment_not_present)
931
	CFI_ADJUST_CFA_OFFSET 4
1041
	CFI_ADJUST_CFA_OFFSET 4
932
	jmp error_code
1042
	jmp error_code
933
	CFI_ENDPROC
1043
	CFI_ENDPROC
Lines 935-941 END(segment_not_present) Link Here
935
1045
936
ENTRY(stack_segment)
1046
ENTRY(stack_segment)
937
	RING0_EC_FRAME
1047
	RING0_EC_FRAME
938
	pushl $do_stack_segment
1048
  	PUSH_XCODE(do_stack_segment)
939
	CFI_ADJUST_CFA_OFFSET 4
1049
	CFI_ADJUST_CFA_OFFSET 4
940
	jmp error_code
1050
	jmp error_code
941
	CFI_ENDPROC
1051
	CFI_ENDPROC
Lines 943-949 END(stack_segment) Link Here
943
1053
944
ENTRY(alignment_check)
1054
ENTRY(alignment_check)
945
	RING0_EC_FRAME
1055
	RING0_EC_FRAME
946
	pushl $do_alignment_check
1056
 	PUSH_XCODE(do_alignment_check)
947
	CFI_ADJUST_CFA_OFFSET 4
1057
	CFI_ADJUST_CFA_OFFSET 4
948
	jmp error_code
1058
	jmp error_code
949
	CFI_ENDPROC
1059
	CFI_ENDPROC
Lines 953-959 ENTRY(divide_error) Link Here
953
	RING0_INT_FRAME
1063
	RING0_INT_FRAME
954
	pushl $0			# no error code
1064
	pushl $0			# no error code
955
	CFI_ADJUST_CFA_OFFSET 4
1065
	CFI_ADJUST_CFA_OFFSET 4
956
	pushl $do_divide_error
1066
 	PUSH_XCODE(do_divide_error)
957
	CFI_ADJUST_CFA_OFFSET 4
1067
	CFI_ADJUST_CFA_OFFSET 4
958
	jmp error_code
1068
	jmp error_code
959
	CFI_ENDPROC
1069
	CFI_ENDPROC
Lines 964-970 ENTRY(machine_check) Link Here
964
	RING0_INT_FRAME
1074
	RING0_INT_FRAME
965
	pushl $0
1075
	pushl $0
966
	CFI_ADJUST_CFA_OFFSET 4
1076
	CFI_ADJUST_CFA_OFFSET 4
967
	pushl machine_check_vector
1077
 	PUSH_XVEC(machine_check_vector)
968
	CFI_ADJUST_CFA_OFFSET 4
1078
	CFI_ADJUST_CFA_OFFSET 4
969
	jmp error_code
1079
	jmp error_code
970
	CFI_ENDPROC
1080
	CFI_ENDPROC
Lines 975-981 ENTRY(spurious_interrupt_bug) Link Here
975
	RING0_INT_FRAME
1085
	RING0_INT_FRAME
976
	pushl $0
1086
	pushl $0
977
	CFI_ADJUST_CFA_OFFSET 4
1087
	CFI_ADJUST_CFA_OFFSET 4
978
	pushl $do_spurious_interrupt_bug
1088
 	PUSH_XCODE(do_spurious_interrupt_bug)
979
	CFI_ADJUST_CFA_OFFSET 4
1089
	CFI_ADJUST_CFA_OFFSET 4
980
	jmp error_code
1090
	jmp error_code
981
	CFI_ENDPROC
1091
	CFI_ENDPROC
Lines 1210-1216 syscall_table_size=(.-sys_call_table) Link Here
1210
1320
1211
ENTRY(page_fault)
1321
ENTRY(page_fault)
1212
	RING0_EC_FRAME
1322
	RING0_EC_FRAME
1213
	pushl $do_page_fault
1323
 	PUSH_XCODE(do_page_fault)
1214
	CFI_ADJUST_CFA_OFFSET 4
1324
	CFI_ADJUST_CFA_OFFSET 4
1215
	ALIGN
1325
	ALIGN
1216
error_code:
1326
error_code:
Lines 1260-1266 error_code: Link Here
1260
	movl %ecx, %es
1370
	movl %ecx, %es
1261
	TRACE_IRQS_OFF
1371
	TRACE_IRQS_OFF
1262
	movl %esp,%eax			# pt_regs pointer
1372
	movl %esp,%eax			# pt_regs pointer
1263
	call *%edi
1373
	HANDLE_EXCEPTION(edi)
1264
	jmp ret_from_exception
1374
	jmp ret_from_exception
1265
	CFI_ENDPROC
1375
	CFI_ENDPROC
1266
END(page_fault)
1376
END(page_fault)
Lines 1304-1309 debug_stack_correct: Link Here
1304
	CFI_ADJUST_CFA_OFFSET 4
1414
	CFI_ADJUST_CFA_OFFSET 4
1305
	SAVE_ALL
1415
	SAVE_ALL
1306
	TRACE_IRQS_OFF
1416
	TRACE_IRQS_OFF
1417
 	DIVERT_EXCEPTION(do_debug)
1307
	xorl %edx,%edx			# error code 0
1418
	xorl %edx,%edx			# error code 0
1308
	movl %esp,%eax			# pt_regs pointer
1419
	movl %esp,%eax			# pt_regs pointer
1309
	call do_debug
1420
	call do_debug
Lines 1404-1409 ENTRY(int3) Link Here
1404
	CFI_ADJUST_CFA_OFFSET 4
1515
	CFI_ADJUST_CFA_OFFSET 4
1405
	SAVE_ALL
1516
	SAVE_ALL
1406
	TRACE_IRQS_OFF
1517
	TRACE_IRQS_OFF
1518
 	DIVERT_EXCEPTION(do_int3)
1407
	xorl %edx,%edx		# zero error code
1519
	xorl %edx,%edx		# zero error code
1408
	movl %esp,%eax		# pt_regs pointer
1520
	movl %esp,%eax		# pt_regs pointer
1409
	call do_int3
1521
	call do_int3
Lines 1413-1419 END(int3) Link Here
1413
1525
1414
ENTRY(general_protection)
1526
ENTRY(general_protection)
1415
	RING0_EC_FRAME
1527
	RING0_EC_FRAME
1416
	pushl $do_general_protection
1528
 	PUSH_XCODE(do_general_protection)
1417
	CFI_ADJUST_CFA_OFFSET 4
1529
	CFI_ADJUST_CFA_OFFSET 4
1418
	jmp error_code
1530
	jmp error_code
1419
	CFI_ENDPROC
1531
	CFI_ENDPROC
(-)a/arch/x86/kernel/entry_64.S (-29 / +201 lines)
Lines 48-53 Link Here
48
#include <asm/unistd.h>
48
#include <asm/unistd.h>
49
#include <asm/thread_info.h>
49
#include <asm/thread_info.h>
50
#include <asm/hw_irq.h>
50
#include <asm/hw_irq.h>
51
#include <asm/ipipe_base.h>
51
#include <asm/page_types.h>
52
#include <asm/page_types.h>
52
#include <asm/irqflags.h>
53
#include <asm/irqflags.h>
53
#include <asm/paravirt.h>
54
#include <asm/paravirt.h>
Lines 61-66 Link Here
61
#define __AUDIT_ARCH_LE	   0x40000000
62
#define __AUDIT_ARCH_LE	   0x40000000
62
63
63
	.code64
64
	.code64
65
66
#ifdef CONFIG_IPIPE
67
#define PREEMPT_SCHEDULE_IRQ		call __ipipe_preempt_schedule_irq
68
#else /* !CONFIG_IPIPE */
69
#define PREEMPT_SCHEDULE_IRQ		call preempt_schedule_irq
70
#endif /* !CONFIG_IPIPE */
71
64
#ifdef CONFIG_FUNCTION_TRACER
72
#ifdef CONFIG_FUNCTION_TRACER
65
#ifdef CONFIG_DYNAMIC_FTRACE
73
#ifdef CONFIG_DYNAMIC_FTRACE
66
ENTRY(mcount)
74
ENTRY(mcount)
Lines 336-342 ENTRY(save_args) Link Here
336
	/*
344
	/*
337
	 * We entered an interrupt context - irqs are off:
345
	 * We entered an interrupt context - irqs are off:
338
	 */
346
	 */
339
2:	TRACE_IRQS_OFF
347
2:
348
#ifndef CONFIG_IPIPE
349
	TRACE_IRQS_OFF
350
#endif
340
	ret
351
	ret
341
	CFI_ENDPROC
352
	CFI_ENDPROC
342
END(save_args)
353
END(save_args)
Lines 402-407 ENTRY(ret_from_fork) Link Here
402
	CFI_ADJUST_CFA_OFFSET 8
413
	CFI_ADJUST_CFA_OFFSET 8
403
	popf					# reset kernel eflags
414
	popf					# reset kernel eflags
404
	CFI_ADJUST_CFA_OFFSET -8
415
	CFI_ADJUST_CFA_OFFSET -8
416
  	ENABLE_INTERRUPTS_HW_COND
405
417
406
	call schedule_tail			# rdi: 'prev' task parameter
418
	call schedule_tail			# rdi: 'prev' task parameter
407
419
Lines 477-482 ENTRY(system_call_after_swapgs) Link Here
477
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
489
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
478
	movq  %rcx,RIP-ARGOFFSET(%rsp)
490
	movq  %rcx,RIP-ARGOFFSET(%rsp)
479
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
491
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
492
#ifdef CONFIG_IPIPE
493
	pushq %rdi
494
	pushq %rax
495
	leaq -(ARGOFFSET-16)(%rsp),%rdi	# regs for handler
496
	call	__ipipe_syscall_root_thunk
497
	testl %eax, %eax
498
	popq %rax
499
	popq %rdi
500
	js    ret_from_sys_call
501
	jnz   sysret_fastexit
502
#endif
480
	GET_THREAD_INFO(%rcx)
503
	GET_THREAD_INFO(%rcx)
481
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
504
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
482
	jnz tracesys
505
	jnz tracesys
Lines 506-511 sysret_check: Link Here
506
	 * sysretq will re-enable interrupts:
529
	 * sysretq will re-enable interrupts:
507
	 */
530
	 */
508
	TRACE_IRQS_ON
531
	TRACE_IRQS_ON
532
sysret_fastexit:
509
	movq RIP-ARGOFFSET(%rsp),%rcx
533
	movq RIP-ARGOFFSET(%rsp),%rcx
510
	CFI_REGISTER	rip,rcx
534
	CFI_REGISTER	rip,rcx
511
	RESTORE_ARGS 0,-ARG_SKIP,1
535
	RESTORE_ARGS 0,-ARG_SKIP,1
Lines 517-522 sysret_check: Link Here
517
	/* Handle reschedules */
541
	/* Handle reschedules */
518
	/* edx:	work, edi: workmask */
542
	/* edx:	work, edi: workmask */
519
sysret_careful:
543
sysret_careful:
544
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),%edx
545
	jnz ret_from_sys_call_trace
520
	bt $TIF_NEED_RESCHED,%edx
546
	bt $TIF_NEED_RESCHED,%edx
521
	jnc sysret_signal
547
	jnc sysret_signal
522
	TRACE_IRQS_ON
548
	TRACE_IRQS_ON
Lines 528-533 sysret_careful: Link Here
528
	CFI_ADJUST_CFA_OFFSET -8
554
	CFI_ADJUST_CFA_OFFSET -8
529
	jmp sysret_check
555
	jmp sysret_check
530
556
557
ret_from_sys_call_trace:
558
	TRACE_IRQS_ON
559
	sti
560
	SAVE_REST
561
	FIXUP_TOP_OF_STACK %rdi
562
	movq %rsp,%rdi
563
	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
564
	RESTORE_REST
565
	jmp int_ret_from_sys_call
566
531
	/* Handle a signal */
567
	/* Handle a signal */
532
sysret_signal:
568
sysret_signal:
533
	TRACE_IRQS_ON
569
	TRACE_IRQS_ON
Lines 800-806 END(interrupt) Link Here
800
	CFI_ADJUST_CFA_OFFSET 10*8
836
	CFI_ADJUST_CFA_OFFSET 10*8
801
	call save_args
837
	call save_args
802
	PARTIAL_FRAME 0
838
	PARTIAL_FRAME 0
839
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
840
	pushq %rbp
841
	leaq RIP-8(%rdi), %rbp	# make interrupted address show up in trace
842
	pushq %rdi
843
	movq ORIG_RAX(%rdi), %rdi	# IRQ number
844
	notq %rdi			# ...is inverted, fix up
845
	call ipipe_trace_begin
846
	popq %rdi
847
	popq %rbp
848
849
	call \func
850
851
	pushq %rbp
852
	pushq %rax
853
	movq 8-ARGOFFSET+ORIG_RAX(%rbp), %rdi
854
	leaq 8-ARGOFFSET+RIP-8(%rbp), %rbp
855
	notq %rdi
856
	call ipipe_trace_end
857
	popq %rax
858
	popq %rbp
859
#else
803
	call \func
860
	call \func
861
#endif
804
	.endm
862
	.endm
805
863
806
	/*
864
	/*
Lines 809-817 END(interrupt) Link Here
809
	 */
867
	 */
810
	.p2align CONFIG_X86_L1_CACHE_SHIFT
868
	.p2align CONFIG_X86_L1_CACHE_SHIFT
811
common_interrupt:
869
common_interrupt:
870
#ifdef CONFIG_IPIPE
871
	XCPT_FRAME
872
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
873
	interrupt *ipipe_irq_handler
874
	testl %eax, %eax
875
	jnz ret_from_intr
876
	decl PER_CPU_VAR(irq_count)
877
	leaveq
878
	CFI_DEF_CFA_REGISTER	rsp
879
	CFI_ADJUST_CFA_OFFSET	-8
880
	testl $3,CS-ARGOFFSET(%rsp)
881
	jz restore_args
882
	jmp retint_swapgs_notrace
883
#else /* !CONFIG_IPIPE */
812
	XCPT_FRAME
884
	XCPT_FRAME
813
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
885
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
814
	interrupt do_IRQ
886
	interrupt do_IRQ
887
#endif /* !CONFIG_IPIPE */
815
	/* 0(%rsp): old_rsp-ARGOFFSET */
888
	/* 0(%rsp): old_rsp-ARGOFFSET */
816
ret_from_intr:
889
ret_from_intr:
817
	DISABLE_INTERRUPTS(CLBR_NONE)
890
	DISABLE_INTERRUPTS(CLBR_NONE)
Lines 820-826 ret_from_intr: Link Here
820
	leaveq
893
	leaveq
821
	CFI_DEF_CFA_REGISTER	rsp
894
	CFI_DEF_CFA_REGISTER	rsp
822
	CFI_ADJUST_CFA_OFFSET	-8
895
	CFI_ADJUST_CFA_OFFSET	-8
823
exit_intr:
896
ENTRY(exit_intr)
824
	GET_THREAD_INFO(%rcx)
897
	GET_THREAD_INFO(%rcx)
825
	testl $3,CS-ARGOFFSET(%rsp)
898
	testl $3,CS-ARGOFFSET(%rsp)
826
	je retint_kernel
899
	je retint_kernel
Lines 840-859 retint_check: Link Here
840
	jnz  retint_careful
913
	jnz  retint_careful
841
914
842
retint_swapgs:		/* return to user-space */
915
retint_swapgs:		/* return to user-space */
916
	TRACE_IRQS_IRETQ
843
	/*
917
	/*
844
	 * The iretq could re-enable interrupts:
918
	 * The iretq could re-enable interrupts:
845
	 */
919
	 */
846
	DISABLE_INTERRUPTS(CLBR_ANY)
920
retint_swapgs_notrace:
847
	TRACE_IRQS_IRETQ
848
	SWAPGS
921
	SWAPGS
922
retint_noswapgs:
849
	jmp restore_args
923
	jmp restore_args
850
924
851
retint_restore_args:	/* return to kernel space */
925
retint_restore_args:	/* return to kernel space */
852
	DISABLE_INTERRUPTS(CLBR_ANY)
926
	TRACE_IRQS_IRETQ
853
	/*
927
	/*
854
	 * The iretq could re-enable interrupts:
928
	 * The iretq could re-enable interrupts:
855
	 */
929
	 */
856
	TRACE_IRQS_IRETQ
857
restore_args:
930
restore_args:
858
	RESTORE_ARGS 0,8,0
931
	RESTORE_ARGS 0,8,0
859
932
Lines 935-941 ENTRY(retint_kernel) Link Here
935
	jnc  retint_restore_args
1008
	jnc  retint_restore_args
936
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
1009
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
937
	jnc  retint_restore_args
1010
	jnc  retint_restore_args
938
	call preempt_schedule_irq
1011
#ifdef CONFIG_IPIPE
1012
	/*
1013
	 * We may have preempted call_softirq before __do_softirq raised or
1014
	 * after it lowered the preemption counter.
1015
	 */
1016
	cmpl $0,PER_CPU_VAR(irq_count)
1017
	jge  retint_restore_args
1018
#endif
1019
	PREEMPT_SCHEDULE_IRQ
939
	jmp exit_intr
1020
	jmp exit_intr
940
#endif
1021
#endif
941
1022
Lines 945-960 END(common_interrupt) Link Here
945
/*
1026
/*
946
 * APIC interrupts.
1027
 * APIC interrupts.
947
 */
1028
 */
948
.macro apicinterrupt num sym do_sym
1029
	.macro apicinterrupt num sym do_sym
949
ENTRY(\sym)
1030
ENTRY(\sym)
950
	INTR_FRAME
1031
	INTR_FRAME
951
	pushq $~(\num)
1032
	pushq $~(\num)
952
	CFI_ADJUST_CFA_OFFSET 8
1033
	CFI_ADJUST_CFA_OFFSET 8
1034
#ifdef CONFIG_IPIPE
1035
	interrupt *ipipe_irq_handler
1036
	testl %eax, %eax
1037
	jnz ret_from_intr
1038
	decl PER_CPU_VAR(irq_count)
1039
	leaveq
1040
	CFI_DEF_CFA_REGISTER	rsp
1041
	CFI_ADJUST_CFA_OFFSET	-8
1042
	testl $3,CS-ARGOFFSET(%rsp)
1043
	jz restore_args
1044
	jmp retint_swapgs_notrace
1045
	CFI_ENDPROC
1046
	.endm
1047
#else /* !CONFIG_IPIPE */
953
	interrupt \do_sym
1048
	interrupt \do_sym
954
	jmp ret_from_intr
1049
	jmp ret_from_intr
955
	CFI_ENDPROC
1050
	CFI_ENDPROC
956
END(\sym)
1051
END(\sym)
957
.endm
1052
.endm
1053
#endif /* !CONFIG_IPIPE */
958
1054
959
#ifdef CONFIG_SMP
1055
#ifdef CONFIG_SMP
960
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
1056
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
Lines 979-984 apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ Link Here
979
	invalidate_interrupt1 smp_invalidate_interrupt
1075
	invalidate_interrupt1 smp_invalidate_interrupt
980
apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
1076
apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
981
	invalidate_interrupt2 smp_invalidate_interrupt
1077
	invalidate_interrupt2 smp_invalidate_interrupt
1078
#ifndef CONFIG_IPIPE
982
apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
1079
apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
983
	invalidate_interrupt3 smp_invalidate_interrupt
1080
	invalidate_interrupt3 smp_invalidate_interrupt
984
apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
1081
apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
Lines 989-994 apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ Link Here
989
	invalidate_interrupt6 smp_invalidate_interrupt
1086
	invalidate_interrupt6 smp_invalidate_interrupt
990
apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
1087
apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
991
	invalidate_interrupt7 smp_invalidate_interrupt
1088
	invalidate_interrupt7 smp_invalidate_interrupt
1089
#endif /* !CONFIG_IPIPE */
992
#endif
1090
#endif
993
1091
994
apicinterrupt THRESHOLD_APIC_VECTOR \
1092
apicinterrupt THRESHOLD_APIC_VECTOR \
Lines 1023-1029 apicinterrupt LOCAL_PENDING_VECTOR \ Link Here
1023
/*
1121
/*
1024
 * Exception entry points.
1122
 * Exception entry points.
1025
 */
1123
 */
1026
.macro zeroentry sym do_sym
1124
.macro zeroentry sym do_sym ex_code
1027
ENTRY(\sym)
1125
ENTRY(\sym)
1028
	INTR_FRAME
1126
	INTR_FRAME
1029
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1127
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1034-1046 ENTRY(\sym) Link Here
1034
	DEFAULT_FRAME 0
1132
	DEFAULT_FRAME 0
1035
	movq %rsp,%rdi		/* pt_regs pointer */
1133
	movq %rsp,%rdi		/* pt_regs pointer */
1036
	xorl %esi,%esi		/* no error code */
1134
	xorl %esi,%esi		/* no error code */
1135
#ifdef CONFIG_IPIPE
1136
	movq $\ex_code,%rdx
1137
	call __ipipe_handle_exception   /* handle(regs, error_code, ex_code) */
1138
	testl %eax, %eax
1139
	jz error_exit
1140
	movl %ebx,%eax
1141
	RESTORE_REST
1142
	DISABLE_INTERRUPTS(CLBR_NONE)
1143
	testl %eax,%eax
1144
	jne retint_noswapgs
1145
	jmp retint_swapgs_notrace
1146
#else /* !CONFIG_IPIPE */
1037
	call \do_sym
1147
	call \do_sym
1148
#endif /* !CONFIG_IPIPE */
1038
	jmp error_exit		/* %ebx: no swapgs flag */
1149
	jmp error_exit		/* %ebx: no swapgs flag */
1039
	CFI_ENDPROC
1150
	CFI_ENDPROC
1040
END(\sym)
1151
END(\sym)
1041
.endm
1152
.endm
1042
1153
1043
.macro paranoidzeroentry sym do_sym
1154
.macro paranoidzeroentry sym do_sym ex_code=0
1044
ENTRY(\sym)
1155
ENTRY(\sym)
1045
	INTR_FRAME
1156
	INTR_FRAME
1046
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1157
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1050-1063 ENTRY(\sym) Link Here
1050
	call save_paranoid
1161
	call save_paranoid
1051
	TRACE_IRQS_OFF
1162
	TRACE_IRQS_OFF
1052
	movq %rsp,%rdi		/* pt_regs pointer */
1163
	movq %rsp,%rdi		/* pt_regs pointer */
1164
#ifdef CONFIG_IPIPE
1165
	.if \ex_code
1166
	movq $\ex_code,%rsi
1167
	call __ipipe_divert_exception   /* handle(regs, ex_code) */
1168
	testl %eax,%eax
1169
	jnz 1f
1170
	movq %rsp,%rdi
1171
	.endif
1172
#endif
1053
	xorl %esi,%esi		/* no error code */
1173
	xorl %esi,%esi		/* no error code */
1054
	call \do_sym
1174
	call \do_sym
1175
#ifdef CONFIG_IPIPE
1176
	xorl %eax,%eax		/* tell paranoid_exit to propagate the exception */
1177
1:
1178
#endif
1055
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1179
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1056
	CFI_ENDPROC
1180
	CFI_ENDPROC
1057
END(\sym)
1181
END(\sym)
1058
.endm
1182
.endm
1059
1183
1060
.macro paranoidzeroentry_ist sym do_sym ist
1184
.macro paranoidzeroentry_ist sym do_sym ist ex_code=0
1061
ENTRY(\sym)
1185
ENTRY(\sym)
1062
	INTR_FRAME
1186
	INTR_FRAME
1063
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1187
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1067-1083 ENTRY(\sym) Link Here
1067
	call save_paranoid
1191
	call save_paranoid
1068
	TRACE_IRQS_OFF
1192
	TRACE_IRQS_OFF
1069
	movq %rsp,%rdi		/* pt_regs pointer */
1193
	movq %rsp,%rdi		/* pt_regs pointer */
1194
#ifdef CONFIG_IPIPE
1195
	.if \ex_code
1196
	movq $\ex_code,%rsi
1197
	call __ipipe_divert_exception   /* handle(regs, ex_code) */
1198
	testl %eax,%eax
1199
	jnz 1f
1200
	movq %rsp,%rdi
1201
	.endif
1202
#endif
1070
	xorl %esi,%esi		/* no error code */
1203
	xorl %esi,%esi		/* no error code */
1071
	PER_CPU(init_tss, %rbp)
1204
	PER_CPU(init_tss, %rbp)
1072
	subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1205
	subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1073
	call \do_sym
1206
	call \do_sym
1074
	addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1207
	addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1208
#ifdef CONFIG_IPIPE
1209
	xorl %eax,%eax		/* tell paranoid_exit to propagate the exception */
1210
1:
1211
#endif
1075
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1212
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1076
	CFI_ENDPROC
1213
	CFI_ENDPROC
1077
END(\sym)
1214
END(\sym)
1078
.endm
1215
.endm
1079
1216
1080
.macro errorentry sym do_sym
1217
.macro errorentry sym do_sym ex_code
1081
ENTRY(\sym)
1218
ENTRY(\sym)
1082
	XCPT_FRAME
1219
	XCPT_FRAME
1083
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1220
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1088-1101 ENTRY(\sym) Link Here
1088
	movq %rsp,%rdi			/* pt_regs pointer */
1225
	movq %rsp,%rdi			/* pt_regs pointer */
1089
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1226
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1090
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1227
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1228
#ifdef CONFIG_IPIPE
1229
	movq $\ex_code,%rdx
1230
	call __ipipe_handle_exception   /* handle(regs, error_code, ex_code) */
1231
	testl %eax, %eax
1232
	jz error_exit
1233
	movl %ebx,%eax
1234
	RESTORE_REST
1235
	DISABLE_INTERRUPTS(CLBR_NONE)
1236
	testl %eax,%eax
1237
	jne retint_noswapgs
1238
	jmp retint_swapgs_notrace
1239
#else /* !CONFIG_IPIPE */
1091
	call \do_sym
1240
	call \do_sym
1241
#endif /* !CONFIG_IPIPE */
1092
	jmp error_exit			/* %ebx: no swapgs flag */
1242
	jmp error_exit			/* %ebx: no swapgs flag */
1093
	CFI_ENDPROC
1243
	CFI_ENDPROC
1094
END(\sym)
1244
END(\sym)
1095
.endm
1245
.endm
1096
1246
1097
	/* error code is on the stack already */
1247
	/* error code is on the stack already */
1098
.macro paranoiderrorentry sym do_sym
1248
.macro paranoiderrorentry sym do_sym ex_code=0
1099
ENTRY(\sym)
1249
ENTRY(\sym)
1100
	XCPT_FRAME
1250
	XCPT_FRAME
1101
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1251
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1105-1131 ENTRY(\sym) Link Here
1105
	DEFAULT_FRAME 0
1255
	DEFAULT_FRAME 0
1106
	TRACE_IRQS_OFF
1256
	TRACE_IRQS_OFF
1107
	movq %rsp,%rdi			/* pt_regs pointer */
1257
	movq %rsp,%rdi			/* pt_regs pointer */
1258
#ifdef CONFIG_IPIPE
1259
	.if \ex_code
1260
	movq $\ex_code,%rsi
1261
	call __ipipe_divert_exception   /* handle(regs, ex_code) */
1262
	testl %eax,%eax
1263
	jnz 1f
1264
	movq %rsp,%rdi
1265
	.endif
1266
#endif
1108
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1267
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1109
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1268
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1110
	call \do_sym
1269
	call \do_sym
1270
#ifdef CONFIG_IPIPE
1271
	xorl %eax,%eax			/* tell paranoid_exit to propagate the exception */
1272
1:
1273
#endif
1111
	jmp paranoid_exit		/* %ebx: no swapgs flag */
1274
	jmp paranoid_exit		/* %ebx: no swapgs flag */
1112
	CFI_ENDPROC
1275
	CFI_ENDPROC
1113
END(\sym)
1276
END(\sym)
1114
.endm
1277
.endm
1115
1278
1116
zeroentry divide_error do_divide_error
1279
zeroentry divide_error do_divide_error ex_do_divide_error
1117
zeroentry overflow do_overflow
1280
zeroentry overflow do_overflow ex_do_overflow
1118
zeroentry bounds do_bounds
1281
zeroentry bounds do_bounds ex_do_bounds
1119
zeroentry invalid_op do_invalid_op
1282
zeroentry invalid_op do_invalid_op ex_do_invalid_op
1120
zeroentry device_not_available do_device_not_available
1283
zeroentry device_not_available do_device_not_available ex_do_device_not_available
1121
paranoiderrorentry double_fault do_double_fault
1284
paranoiderrorentry double_fault do_double_fault
1122
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1285
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun ex_do_coprocessor_segment_overrun
1123
errorentry invalid_TSS do_invalid_TSS
1286
errorentry invalid_TSS do_invalid_TSS ex_do_invalid_TSS
1124
errorentry segment_not_present do_segment_not_present
1287
errorentry segment_not_present do_segment_not_present ex_do_segment_not_present
1125
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1288
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug ex_do_spurious_interrupt_bug
1126
zeroentry coprocessor_error do_coprocessor_error
1289
zeroentry coprocessor_error do_coprocessor_error ex_do_coprocessor_error
1127
errorentry alignment_check do_alignment_check
1290
errorentry alignment_check do_alignment_check ex_do_alignment_check
1128
zeroentry simd_coprocessor_error do_simd_coprocessor_error
1291
zeroentry simd_coprocessor_error do_simd_coprocessor_error ex_do_simd_coprocessor_error
1129
1292
1130
	/* Reload gs selector with exception handling */
1293
	/* Reload gs selector with exception handling */
1131
	/* edi:  new selector */
1294
	/* edi:  new selector */
Lines 1255-1268 ENTRY(call_softirq) Link Here
1255
	CFI_REL_OFFSET rbp,0
1418
	CFI_REL_OFFSET rbp,0
1256
	mov  %rsp,%rbp
1419
	mov  %rsp,%rbp
1257
	CFI_DEF_CFA_REGISTER rbp
1420
	CFI_DEF_CFA_REGISTER rbp
1421
	DISABLE_INTERRUPTS_HW_COND
1258
	incl PER_CPU_VAR(irq_count)
1422
	incl PER_CPU_VAR(irq_count)
1259
	cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1423
	cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1424
	ENABLE_INTERRUPTS_HW_COND
1260
	push  %rbp			# backlink for old unwinder
1425
	push  %rbp			# backlink for old unwinder
1261
	call __do_softirq
1426
	call __do_softirq
1427
	DISABLE_INTERRUPTS_HW_COND
1262
	leaveq
1428
	leaveq
1263
	CFI_DEF_CFA_REGISTER	rsp
1429
	CFI_DEF_CFA_REGISTER	rsp
1264
	CFI_ADJUST_CFA_OFFSET   -8
1430
	CFI_ADJUST_CFA_OFFSET   -8
1265
	decl PER_CPU_VAR(irq_count)
1431
	decl PER_CPU_VAR(irq_count)
1432
	ENABLE_INTERRUPTS_HW_COND
1266
	ret
1433
	ret
1267
	CFI_ENDPROC
1434
	CFI_ENDPROC
1268
END(call_softirq)
1435
END(call_softirq)
Lines 1371-1386 END(xen_failsafe_callback) Link Here
1371
 */
1538
 */
1372
	.pushsection .kprobes.text, "ax"
1539
	.pushsection .kprobes.text, "ax"
1373
1540
1374
paranoidzeroentry_ist debug do_debug DEBUG_STACK
1541
paranoidzeroentry_ist debug do_debug DEBUG_STACK ex_do_debug
1375
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1542
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK ex_do_int3
1376
paranoiderrorentry stack_segment do_stack_segment
1543
paranoiderrorentry stack_segment do_stack_segment
1377
#ifdef CONFIG_XEN
1544
#ifdef CONFIG_XEN
1378
zeroentry xen_debug do_debug
1545
zeroentry xen_debug do_debug
1379
zeroentry xen_int3 do_int3
1546
zeroentry xen_int3 do_int3
1380
errorentry xen_stack_segment do_stack_segment
1547
errorentry xen_stack_segment do_stack_segment
1381
#endif
1548
#endif
1382
errorentry general_protection do_general_protection
1549
errorentry general_protection do_general_protection ex_do_general_protection
1383
errorentry page_fault do_page_fault
1550
errorentry page_fault do_page_fault ex_do_page_fault
1384
#ifdef CONFIG_X86_MCE
1551
#ifdef CONFIG_X86_MCE
1385
paranoidzeroentry machine_check *machine_check_vector(%rip)
1552
paranoidzeroentry machine_check *machine_check_vector(%rip)
1386
#endif
1553
#endif
Lines 1403-1410 ENTRY(paranoid_exit) Link Here
1403
	INTR_FRAME
1570
	INTR_FRAME
1404
	DISABLE_INTERRUPTS(CLBR_NONE)
1571
	DISABLE_INTERRUPTS(CLBR_NONE)
1405
	TRACE_IRQS_OFF
1572
	TRACE_IRQS_OFF
1573
paranoid_notrace:
1406
	testl %ebx,%ebx				/* swapgs needed? */
1574
	testl %ebx,%ebx				/* swapgs needed? */
1407
	jnz paranoid_restore
1575
	jnz paranoid_restore
1576
#ifdef CONFIG_IPIPE
1577
	testl %eax,%eax
1578
	jnz paranoid_swapgs
1579
#endif
1408
	testl $3,CS(%rsp)
1580
	testl $3,CS(%rsp)
1409
	jnz   paranoid_userspace
1581
	jnz   paranoid_userspace
1410
paranoid_swapgs:
1582
paranoid_swapgs:
(-)a/arch/x86/kernel/i8253.c (+7 lines)
Lines 11-16 Link Here
11
#include <linux/delay.h>
11
#include <linux/delay.h>
12
#include <linux/init.h>
12
#include <linux/init.h>
13
#include <linux/io.h>
13
#include <linux/io.h>
14
#include <linux/ipipe.h>
14
15
15
#include <asm/i8253.h>
16
#include <asm/i8253.h>
16
#include <asm/hpet.h>
17
#include <asm/hpet.h>
Lines 130-135 static cycle_t pit_read(struct clocksource *cs) Link Here
130
	int count;
131
	int count;
131
	u32 jifs;
132
	u32 jifs;
132
133
134
#ifdef CONFIG_IPIPE
135
	if (!__ipipe_pipeline_head_p(ipipe_root_domain))
136
		/* We don't really own the PIT. */
137
		return (cycle_t)(jiffies * LATCH) + (LATCH - 1) - old_count;
138
#endif /* CONFIG_IPIPE */
139
133
	spin_lock_irqsave(&i8253_lock, flags);
140
	spin_lock_irqsave(&i8253_lock, flags);
134
	/*
141
	/*
135
	 * Although our caller may have the read side of xtime_lock,
142
	 * Although our caller may have the read side of xtime_lock,
(-)a/arch/x86/kernel/i8259.c (-7 / +23 lines)
Lines 32-38 Link Here
32
 */
32
 */
33
33
34
static int i8259A_auto_eoi;
34
static int i8259A_auto_eoi;
35
DEFINE_SPINLOCK(i8259A_lock);
35
IPIPE_DEFINE_SPINLOCK(i8259A_lock);
36
static void mask_and_ack_8259A(unsigned int);
36
static void mask_and_ack_8259A(unsigned int);
37
37
38
struct irq_chip i8259A_chip = {
38
struct irq_chip i8259A_chip = {
Lines 69-74 void disable_8259A_irq(unsigned int irq) Link Here
69
	unsigned long flags;
69
	unsigned long flags;
70
70
71
	spin_lock_irqsave(&i8259A_lock, flags);
71
	spin_lock_irqsave(&i8259A_lock, flags);
72
	ipipe_irq_lock(irq);
72
	cached_irq_mask |= mask;
73
	cached_irq_mask |= mask;
73
	if (irq & 8)
74
	if (irq & 8)
74
		outb(cached_slave_mask, PIC_SLAVE_IMR);
75
		outb(cached_slave_mask, PIC_SLAVE_IMR);
Lines 79-93 void disable_8259A_irq(unsigned int irq) Link Here
79
80
80
void enable_8259A_irq(unsigned int irq)
81
void enable_8259A_irq(unsigned int irq)
81
{
82
{
82
	unsigned int mask = ~(1 << irq);
83
	unsigned int mask = (1 << irq);
83
	unsigned long flags;
84
	unsigned long flags;
84
85
85
	spin_lock_irqsave(&i8259A_lock, flags);
86
	spin_lock_irqsave(&i8259A_lock, flags);
86
	cached_irq_mask &= mask;
87
	if (cached_irq_mask & mask) {
87
	if (irq & 8)
88
		cached_irq_mask &= ~mask;
88
		outb(cached_slave_mask, PIC_SLAVE_IMR);
89
		if (irq & 8)
89
	else
90
			outb(cached_slave_mask, PIC_SLAVE_IMR);
90
		outb(cached_master_mask, PIC_MASTER_IMR);
91
		else
92
			outb(cached_master_mask, PIC_MASTER_IMR);
93
		ipipe_irq_unlock(irq);
94
	}
91
	spin_unlock_irqrestore(&i8259A_lock, flags);
95
	spin_unlock_irqrestore(&i8259A_lock, flags);
92
}
96
}
93
97
Lines 168-173 static void mask_and_ack_8259A(unsigned int irq) Link Here
168
	 */
172
	 */
169
	if (cached_irq_mask & irqmask)
173
	if (cached_irq_mask & irqmask)
170
		goto spurious_8259A_irq;
174
		goto spurious_8259A_irq;
175
#ifdef CONFIG_IPIPE
176
	if (irq == 0) {
177
		/*
178
		 * Fast timer ack -- don't mask (unless supposedly
179
		 * spurious). We trace outb's in order to detect
180
		 * broken hardware inducing large delays.
181
		 */
182
		outb(0x60, PIC_MASTER_CMD);	/* Specific EOI to master. */
183
		spin_unlock_irqrestore(&i8259A_lock, flags);
184
		return;
185
	}
186
#endif /* CONFIG_IPIPE */
171
	cached_irq_mask |= irqmask;
187
	cached_irq_mask |= irqmask;
172
188
173
handle_real_irq:
189
handle_real_irq:
(-)a/arch/x86/kernel/ipipe.c (+1084 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   linux/arch/x86/kernel/ipipe.c
3
 *
4
 *   Copyright (C) 2002-2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 *
21
 *   Architecture-dependent I-PIPE support for x86.
22
 */
23
24
#include <linux/kernel.h>
25
#include <linux/smp.h>
26
#include <linux/module.h>
27
#include <linux/sched.h>
28
#include <linux/interrupt.h>
29
#include <linux/slab.h>
30
#include <linux/irq.h>
31
#include <linux/clockchips.h>
32
#include <linux/kprobes.h>
33
#include <asm/unistd.h>
34
#include <asm/system.h>
35
#include <asm/atomic.h>
36
#include <asm/hw_irq.h>
37
#include <asm/irq.h>
38
#include <asm/desc.h>
39
#include <asm/io.h>
40
#ifdef CONFIG_X86_LOCAL_APIC
41
#include <asm/tlbflush.h>
42
#include <asm/fixmap.h>
43
#include <asm/bitops.h>
44
#include <asm/mpspec.h>
45
#ifdef CONFIG_X86_IO_APIC
46
#include <asm/io_apic.h>
47
#endif	/* CONFIG_X86_IO_APIC */
48
#include <asm/apic.h>
49
#endif	/* CONFIG_X86_LOCAL_APIC */
50
#include <asm/traps.h>
51
52
int __ipipe_tick_irq = 0;	/* Legacy timer */
53
54
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
55
56
DEFINE_PER_CPU(unsigned long, __ipipe_cr2);
57
EXPORT_PER_CPU_SYMBOL_GPL(__ipipe_cr2);
58
59
#ifdef CONFIG_SMP
60
61
static cpumask_t __ipipe_cpu_sync_map;
62
63
static cpumask_t __ipipe_cpu_lock_map;
64
65
static unsigned long __ipipe_critical_lock;
66
67
static IPIPE_DEFINE_SPINLOCK(__ipipe_cpu_barrier);
68
69
static atomic_t __ipipe_critical_count = ATOMIC_INIT(0);
70
71
static void (*__ipipe_cpu_sync) (void);
72
73
#endif /* CONFIG_SMP */
74
75
/*
76
 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
77
 * just like if it has been actually received from a hw source. Also
78
 * works for virtual interrupts.
79
 */
80
int ipipe_trigger_irq(unsigned int irq)
81
{
82
	struct pt_regs regs;
83
	unsigned long flags;
84
85
#ifdef CONFIG_IPIPE_DEBUG
86
	if (irq >= IPIPE_NR_IRQS)
87
		return -EINVAL;
88
	if (ipipe_virtual_irq_p(irq)) {
89
		if (!test_bit(irq - IPIPE_VIRQ_BASE,
90
			      &__ipipe_virtual_irq_map))
91
			return -EINVAL;
92
	} else if (irq_to_desc(irq) == NULL)
93
		return -EINVAL;
94
#endif
95
	local_irq_save_hw(flags);
96
	regs.flags = flags;
97
	regs.orig_ax = irq;	/* Positive value - IRQ won't be acked */
98
	regs.cs = __KERNEL_CS;
99
	__ipipe_handle_irq(&regs);
100
	local_irq_restore_hw(flags);
101
102
	return 1;
103
}
104
105
int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
106
{
107
	info->ncpus = num_online_cpus();
108
	info->cpufreq = ipipe_cpu_freq();
109
	info->archdep.tmirq = __ipipe_tick_irq;
110
#ifdef CONFIG_X86_TSC
111
	info->archdep.tmfreq = ipipe_cpu_freq();
112
#else	/* !CONFIG_X86_TSC */
113
	info->archdep.tmfreq = CLOCK_TICK_RATE;
114
#endif	/* CONFIG_X86_TSC */
115
116
	return 0;
117
}
118
119
#ifdef CONFIG_X86_UV
120
asmlinkage void uv_bau_message_interrupt(struct pt_regs *regs);
121
#endif
122
#ifdef CONFIG_X86_MCE_THRESHOLD
123
asmlinkage void smp_threshold_interrupt(void);
124
#endif
125
#ifdef CONFIG_X86_NEW_MCE
126
asmlinkage void smp_mce_self_interrupt(void);
127
#endif
128
129
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
130
{
131
	desc->ipipe_ack(irq, desc);
132
}
133
134
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
135
{
136
	irq_to_desc(irq)->status &= ~IRQ_DISABLED;
137
}
138
139
#ifdef CONFIG_X86_LOCAL_APIC
140
141
static void __ipipe_noack_apic(unsigned irq, struct irq_desc *desc)
142
{
143
}
144
145
static void __ipipe_ack_apic(unsigned irq, struct irq_desc *desc)
146
{
147
	__ack_APIC_irq();
148
}
149
150
static void __ipipe_null_handler(unsigned irq, void *cookie)
151
{
152
}
153
154
#endif	/* CONFIG_X86_LOCAL_APIC */
155
156
/* __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
157
   interrupts are off, and secondary CPUs are still lost in space. */
158
159
void __init __ipipe_enable_pipeline(void)
160
{
161
	unsigned int vector, irq;
162
163
#ifdef CONFIG_X86_LOCAL_APIC
164
165
	/* Map the APIC system vectors. */
166
167
	ipipe_virtualize_irq(ipipe_root_domain,
168
			     ipipe_apic_vector_irq(LOCAL_TIMER_VECTOR),
169
			     (ipipe_irq_handler_t)&smp_apic_timer_interrupt,
170
			     NULL,
171
			     &__ipipe_ack_apic,
172
			     IPIPE_STDROOT_MASK);
173
174
	ipipe_virtualize_irq(ipipe_root_domain,
175
			     ipipe_apic_vector_irq(SPURIOUS_APIC_VECTOR),
176
			     (ipipe_irq_handler_t)&smp_spurious_interrupt,
177
			     NULL,
178
			     &__ipipe_noack_apic,
179
			     IPIPE_STDROOT_MASK);
180
181
	ipipe_virtualize_irq(ipipe_root_domain,
182
			     ipipe_apic_vector_irq(ERROR_APIC_VECTOR),
183
			     (ipipe_irq_handler_t)&smp_error_interrupt,
184
			     NULL,
185
			     &__ipipe_ack_apic,
186
			     IPIPE_STDROOT_MASK);
187
188
	ipipe_virtualize_irq(ipipe_root_domain,
189
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR0),
190
			     &__ipipe_null_handler,
191
			     NULL,
192
			     &__ipipe_ack_apic,
193
			     IPIPE_STDROOT_MASK);
194
195
	ipipe_virtualize_irq(ipipe_root_domain,
196
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR1),
197
			     &__ipipe_null_handler,
198
			     NULL,
199
			     &__ipipe_ack_apic,
200
			     IPIPE_STDROOT_MASK);
201
202
	ipipe_virtualize_irq(ipipe_root_domain,
203
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR2),
204
			     &__ipipe_null_handler,
205
			     NULL,
206
			     &__ipipe_ack_apic,
207
			     IPIPE_STDROOT_MASK);
208
209
	ipipe_virtualize_irq(ipipe_root_domain,
210
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR3),
211
			     &__ipipe_null_handler,
212
			     NULL,
213
			     &__ipipe_ack_apic,
214
			     IPIPE_STDROOT_MASK);
215
216
#ifdef CONFIG_X86_THERMAL_VECTOR
217
	ipipe_virtualize_irq(ipipe_root_domain,
218
			     ipipe_apic_vector_irq(THERMAL_APIC_VECTOR),
219
			     (ipipe_irq_handler_t)&smp_thermal_interrupt,
220
			     NULL,
221
			     &__ipipe_ack_apic,
222
			     IPIPE_STDROOT_MASK);
223
#endif /* CONFIG_X86_THERMAL_VECTOR */
224
225
#ifdef CONFIG_X86_MCE_THRESHOLD
226
	ipipe_virtualize_irq(ipipe_root_domain,
227
			     ipipe_apic_vector_irq(THRESHOLD_APIC_VECTOR),
228
			     (ipipe_irq_handler_t)&smp_threshold_interrupt,
229
			     NULL,
230
			     &__ipipe_ack_apic,
231
			     IPIPE_STDROOT_MASK);
232
#endif /* CONFIG_X86_MCE_THRESHOLD */
233
234
#ifdef CONFIG_X86_NEW_MCE
235
	ipipe_virtualize_irq(ipipe_root_domain,
236
			     ipipe_apic_vector_irq(MCE_SELF_VECTOR),
237
			     (ipipe_irq_handler_t)&smp_mce_self_interrupt,
238
			     NULL,
239
			     &__ipipe_ack_apic,
240
			     IPIPE_STDROOT_MASK);
241
#endif /* CONFIG_X86_MCE_THRESHOLD */
242
243
#ifdef CONFIG_X86_UV
244
	ipipe_virtualize_irq(ipipe_root_domain,
245
			     ipipe_apic_vector_irq(UV_BAU_MESSAGE),
246
			     (ipipe_irq_handler_t)&uv_bau_message_interrupt,
247
			     NULL,
248
			     &__ipipe_ack_apic,
249
			     IPIPE_STDROOT_MASK);
250
#endif /* CONFIG_X86_UV */
251
252
	ipipe_virtualize_irq(ipipe_root_domain,
253
			     ipipe_apic_vector_irq(GENERIC_INTERRUPT_VECTOR),
254
			     (ipipe_irq_handler_t)&smp_generic_interrupt,
255
			     NULL,
256
			     &__ipipe_ack_apic,
257
			     IPIPE_STDROOT_MASK);
258
259
#ifdef CONFIG_PERF_COUNTERS
260
	ipipe_virtualize_irq(ipipe_root_domain,
261
			     ipipe_apic_vector_irq(LOCAL_PENDING_VECTOR),
262
			     (ipipe_irq_handler_t)&perf_pending_interrupt,
263
			     NULL,
264
			     &__ipipe_ack_apic,
265
			     IPIPE_STDROOT_MASK);
266
#endif /* CONFIG_PERF_COUNTERS */
267
268
#endif	/* CONFIG_X86_LOCAL_APIC */
269
270
#ifdef CONFIG_SMP
271
	ipipe_virtualize_irq(ipipe_root_domain,
272
			     ipipe_apic_vector_irq(RESCHEDULE_VECTOR),
273
			     (ipipe_irq_handler_t)&smp_reschedule_interrupt,
274
			     NULL,
275
			     &__ipipe_ack_apic,
276
			     IPIPE_STDROOT_MASK);
277
278
	for (vector = INVALIDATE_TLB_VECTOR_START;
279
	     vector <= INVALIDATE_TLB_VECTOR_END; ++vector)
280
		ipipe_virtualize_irq(ipipe_root_domain,
281
				     ipipe_apic_vector_irq(vector),
282
				     (ipipe_irq_handler_t)&smp_invalidate_interrupt,
283
				     NULL,
284
				     &__ipipe_ack_apic,
285
				     IPIPE_STDROOT_MASK);
286
287
	ipipe_virtualize_irq(ipipe_root_domain,
288
			     ipipe_apic_vector_irq(CALL_FUNCTION_VECTOR),
289
			     (ipipe_irq_handler_t)&smp_call_function_interrupt,
290
			     NULL,
291
			     &__ipipe_ack_apic,
292
			     IPIPE_STDROOT_MASK);
293
294
	ipipe_virtualize_irq(ipipe_root_domain,
295
			     ipipe_apic_vector_irq(CALL_FUNCTION_SINGLE_VECTOR),
296
			     (ipipe_irq_handler_t)&smp_call_function_single_interrupt,
297
			     NULL,
298
			     &__ipipe_ack_apic,
299
			     IPIPE_STDROOT_MASK);
300
301
	ipipe_virtualize_irq(ipipe_root_domain,
302
			     IRQ_MOVE_CLEANUP_VECTOR,
303
			     (ipipe_irq_handler_t)&smp_irq_move_cleanup_interrupt,
304
			     NULL,
305
			     &__ipipe_ack_apic,
306
			     IPIPE_STDROOT_MASK);
307
308
	ipipe_virtualize_irq(ipipe_root_domain,
309
			     ipipe_apic_vector_irq(REBOOT_VECTOR),
310
			     (ipipe_irq_handler_t)&smp_reboot_interrupt,
311
			     NULL,
312
			     &__ipipe_ack_apic,
313
			     IPIPE_STDROOT_MASK);
314
#else
315
	(void)vector;
316
#endif	/* CONFIG_SMP */
317
318
	/* Finally, virtualize the remaining ISA and IO-APIC
319
	 * interrupts. Interrupts which have already been virtualized
320
	 * will just beget a silent -EPERM error since
321
	 * IPIPE_SYSTEM_MASK has been passed for them, that's ok. */
322
323
	for (irq = 0; irq < NR_IRQS; irq++)
324
		/*
325
		 * Fails for IPIPE_CRITICAL_IPI and IRQ_MOVE_CLEANUP_VECTOR,
326
		 * but that's ok.
327
		 */
328
		ipipe_virtualize_irq(ipipe_root_domain,
329
				     irq,
330
				     (ipipe_irq_handler_t)&do_IRQ,
331
				     NULL,
332
				     &__ipipe_ack_irq,
333
				     IPIPE_STDROOT_MASK);
334
335
#ifdef CONFIG_X86_LOCAL_APIC
336
	/* Eventually allow these vectors to be reprogrammed. */
337
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI0].control &= ~IPIPE_SYSTEM_MASK;
338
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI1].control &= ~IPIPE_SYSTEM_MASK;
339
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI2].control &= ~IPIPE_SYSTEM_MASK;
340
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI3].control &= ~IPIPE_SYSTEM_MASK;
341
#endif	/* CONFIG_X86_LOCAL_APIC */
342
}
343
344
#ifdef CONFIG_SMP
345
346
cpumask_t __ipipe_set_irq_affinity(unsigned irq, cpumask_t cpumask)
347
{
348
	cpumask_t oldmask;
349
350
	if (irq_to_desc(irq)->chip->set_affinity == NULL)
351
		return CPU_MASK_NONE;
352
353
	if (cpus_empty(cpumask))
354
		return CPU_MASK_NONE; /* Return mask value -- no change. */
355
356
	cpus_and(cpumask, cpumask, cpu_online_map);
357
	if (cpus_empty(cpumask))
358
		return CPU_MASK_NONE;	/* Error -- bad mask value or non-routable IRQ. */
359
360
	cpumask_copy(&oldmask, irq_to_desc(irq)->affinity);
361
	irq_to_desc(irq)->chip->set_affinity(irq, &cpumask);
362
363
	return oldmask;
364
}
365
366
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
367
{
368
	unsigned long flags;
369
	int self;
370
371
	if (ipi != IPIPE_SERVICE_IPI0 &&
372
	    ipi != IPIPE_SERVICE_IPI1 &&
373
	    ipi != IPIPE_SERVICE_IPI2 &&
374
	    ipi != IPIPE_SERVICE_IPI3)
375
		return -EINVAL;
376
377
	local_irq_save_hw(flags);
378
379
	self = cpu_isset(ipipe_processor_id(),cpumask);
380
	cpu_clear(ipipe_processor_id(), cpumask);
381
382
	if (!cpus_empty(cpumask))
383
		apic->send_IPI_mask(&cpumask, ipipe_apic_irq_vector(ipi));
384
385
	if (self)
386
		ipipe_trigger_irq(ipi);
387
388
	local_irq_restore_hw(flags);
389
390
	return 0;
391
}
392
393
/* Always called with hw interrupts off. */
394
395
void __ipipe_do_critical_sync(unsigned irq, void *cookie)
396
{
397
	int cpu = ipipe_processor_id();
398
399
	cpu_set(cpu, __ipipe_cpu_sync_map);
400
401
	/* Now we are in sync with the lock requestor running on another
402
	   CPU. Enter a spinning wait until he releases the global
403
	   lock. */
404
	spin_lock(&__ipipe_cpu_barrier);
405
406
	/* Got it. Now get out. */
407
408
	if (__ipipe_cpu_sync)
409
		/* Call the sync routine if any. */
410
		__ipipe_cpu_sync();
411
412
	spin_unlock(&__ipipe_cpu_barrier);
413
414
	cpu_clear(cpu, __ipipe_cpu_sync_map);
415
}
416
417
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
418
{
419
	ipd->irqs[IPIPE_CRITICAL_IPI].acknowledge = &__ipipe_ack_apic;
420
	ipd->irqs[IPIPE_CRITICAL_IPI].handler = &__ipipe_do_critical_sync;
421
	ipd->irqs[IPIPE_CRITICAL_IPI].cookie = NULL;
422
	/* Immediately handle in the current domain but *never* pass */
423
	ipd->irqs[IPIPE_CRITICAL_IPI].control =
424
		IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
425
}
426
427
#endif	/* CONFIG_SMP */
428
429
/*
430
 * ipipe_critical_enter() -- Grab the superlock excluding all CPUs but
431
 * the current one from a critical section. This lock is used when we
432
 * must enforce a global critical section for a single CPU in a
433
 * possibly SMP system whichever context the CPUs are running.
434
 */
435
unsigned long ipipe_critical_enter(void (*syncfn) (void))
436
{
437
	unsigned long flags;
438
439
	local_irq_save_hw(flags);
440
441
#ifdef CONFIG_SMP
442
	if (unlikely(num_online_cpus() == 1))
443
		return flags;
444
445
	{
446
		int cpu = ipipe_processor_id();
447
		cpumask_t lock_map;
448
449
		if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) {
450
			while (test_and_set_bit(0, &__ipipe_critical_lock)) {
451
				int n = 0;
452
				do {
453
					cpu_relax();
454
				} while (++n < cpu);
455
			}
456
457
			spin_lock(&__ipipe_cpu_barrier);
458
459
			__ipipe_cpu_sync = syncfn;
460
461
			/* Send the sync IPI to all processors but the current one. */
462
			apic->send_IPI_allbutself(IPIPE_CRITICAL_VECTOR);
463
464
			cpus_andnot(lock_map, cpu_online_map, __ipipe_cpu_lock_map);
465
466
			while (!cpus_equal(__ipipe_cpu_sync_map, lock_map))
467
				cpu_relax();
468
		}
469
470
		atomic_inc(&__ipipe_critical_count);
471
	}
472
#endif	/* CONFIG_SMP */
473
474
	return flags;
475
}
476
477
/* ipipe_critical_exit() -- Release the superlock. */
478
479
void ipipe_critical_exit(unsigned long flags)
480
{
481
#ifdef CONFIG_SMP
482
	if (num_online_cpus() == 1)
483
		goto out;
484
485
	if (atomic_dec_and_test(&__ipipe_critical_count)) {
486
		spin_unlock(&__ipipe_cpu_barrier);
487
488
		while (!cpus_empty(__ipipe_cpu_sync_map))
489
			cpu_relax();
490
491
		cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map);
492
		clear_bit(0, &__ipipe_critical_lock);
493
		smp_mb__after_clear_bit();
494
	}
495
out:
496
#endif	/* CONFIG_SMP */
497
498
	local_irq_restore_hw(flags);
499
}
500
501
static inline void __fixup_if(int s, struct pt_regs *regs)
502
{
503
	/*
504
	 * Have the saved hw state look like the domain stall bit, so
505
	 * that __ipipe_unstall_iret_root() restores the proper
506
	 * pipeline state for the root stage upon exit.
507
	 */
508
	if (s)
509
		regs->flags &= ~X86_EFLAGS_IF;
510
	else
511
		regs->flags |= X86_EFLAGS_IF;
512
}
513
514
#ifdef CONFIG_X86_32
515
516
/*
517
 * Check the stall bit of the root domain to make sure the existing
518
 * preemption opportunity upon in-kernel resumption could be
519
 * exploited. In case a rescheduling could take place, the root stage
520
 * is stalled before the hw interrupts are re-enabled. This routine
521
 * must be called with hw interrupts off.
522
 */
523
524
asmlinkage int __ipipe_kpreempt_root(struct pt_regs regs)
525
{
526
	if (test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
527
		/* Root stage is stalled: rescheduling denied. */
528
		return 0;
529
530
	__ipipe_stall_root();
531
	trace_hardirqs_off();
532
	local_irq_enable_hw_notrace();
533
534
	return 1;	/* Ok, may reschedule now. */
535
}
536
537
asmlinkage void __ipipe_unstall_iret_root(struct pt_regs regs)
538
{
539
	struct ipipe_percpu_domain_data *p;
540
541
	/* Emulate IRET's handling of the interrupt flag. */
542
543
	local_irq_disable_hw();
544
545
	p = ipipe_root_cpudom_ptr();
546
547
	/*
548
	 * Restore the software state as it used to be on kernel
549
	 * entry. CAUTION: NMIs must *not* return through this
550
	 * emulation.
551
	 */
552
	if (raw_irqs_disabled_flags(regs.flags)) {
553
		if (!__test_and_set_bit(IPIPE_STALL_FLAG, &p->status))
554
			trace_hardirqs_off();
555
		regs.flags |= X86_EFLAGS_IF;
556
	} else {
557
		if (test_bit(IPIPE_STALL_FLAG, &p->status)) {
558
			trace_hardirqs_on();
559
			__clear_bit(IPIPE_STALL_FLAG, &p->status);
560
		}
561
		/*
562
		 * We could have received and logged interrupts while
563
		 * stalled in the syscall path: play the log now to
564
		 * release any pending event. The SYNC_BIT prevents
565
		 * infinite recursion in case of flooding.
566
		 */
567
		if (unlikely(__ipipe_ipending_p(p)))
568
			__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
569
	}
570
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
571
	ipipe_trace_end(0x8000000D);
572
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
573
}
574
575
#else /* !CONFIG_X86_32 */
576
577
#ifdef CONFIG_PREEMPT
578
579
asmlinkage void preempt_schedule_irq(void);
580
581
void __ipipe_preempt_schedule_irq(void)
582
{
583
	struct ipipe_percpu_domain_data *p; 
584
	unsigned long flags;  
585
	/*  
586
	 * We have no IRQ state fixup on entry to exceptions in 
587
	 * x86_64, so we have to stall the root stage before 
588
	 * rescheduling. 
589
	 */  
590
	BUG_ON(!irqs_disabled_hw());  
591
	local_irq_save(flags);	
592
	local_irq_enable_hw();	
593
	preempt_schedule_irq(); /* Ok, may reschedule now. */  
594
	local_irq_disable_hw(); 
595
596
	/*
597
	 * Flush any pending interrupt that may have been logged after
598
	 * preempt_schedule_irq() stalled the root stage before
599
	 * returning to us, and now.
600
	 */
601
	p = ipipe_root_cpudom_ptr(); 
602
	if (unlikely(__ipipe_ipending_p(p))) { 
603
		add_preempt_count(PREEMPT_ACTIVE);
604
		trace_hardirqs_on();
605
		clear_bit(IPIPE_STALL_FLAG, &p->status); 
606
		__ipipe_sync_pipeline(IPIPE_IRQ_DOALL); 
607
		sub_preempt_count(PREEMPT_ACTIVE);
608
	} 
609
610
	__local_irq_restore_nosync(flags);  
611
}
612
613
#endif	/* CONFIG_PREEMPT */
614
615
#endif /* !CONFIG_X86_32 */
616
617
void __ipipe_halt_root(void)
618
{
619
	struct ipipe_percpu_domain_data *p;
620
621
	/* Emulate sti+hlt sequence over the root domain. */
622
623
	local_irq_disable_hw();
624
625
	p = ipipe_root_cpudom_ptr();
626
627
	trace_hardirqs_on();
628
	clear_bit(IPIPE_STALL_FLAG, &p->status);
629
630
	if (unlikely(__ipipe_ipending_p(p))) {
631
		__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
632
		local_irq_enable_hw();
633
	} else {
634
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
635
		ipipe_trace_end(0x8000000E);
636
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
637
		asm volatile("sti; hlt": : :"memory");
638
	}
639
}
640
641
static void do_machine_check_vector(struct pt_regs *regs, long error_code)
642
{
643
#ifdef CONFIG_X86_MCE
644
#ifdef CONFIG_X86_32
645
	extern void (*machine_check_vector)(struct pt_regs *, long error_code);
646
	machine_check_vector(regs, error_code);
647
#else
648
	do_machine_check(regs, error_code);
649
#endif
650
#endif /* CONFIG_X86_MCE */
651
}
652
653
/* Work around genksyms's issue with over-qualification in decls. */
654
655
typedef void dotraplinkage __ipipe_exhandler(struct pt_regs *, long);
656
657
typedef __ipipe_exhandler *__ipipe_exptr;
658
659
static __ipipe_exptr __ipipe_std_extable[] = {
660
661
	[ex_do_divide_error] = &do_divide_error,
662
	[ex_do_overflow] = &do_overflow,
663
	[ex_do_bounds] = &do_bounds,
664
	[ex_do_invalid_op] = &do_invalid_op,
665
	[ex_do_coprocessor_segment_overrun] = &do_coprocessor_segment_overrun,
666
	[ex_do_invalid_TSS] = &do_invalid_TSS,
667
	[ex_do_segment_not_present] = &do_segment_not_present,
668
	[ex_do_stack_segment] = &do_stack_segment,
669
	[ex_do_general_protection] = do_general_protection,
670
	[ex_do_page_fault] = (__ipipe_exptr)&do_page_fault,
671
	[ex_do_spurious_interrupt_bug] = &do_spurious_interrupt_bug,
672
	[ex_do_coprocessor_error] = &do_coprocessor_error,
673
	[ex_do_alignment_check] = &do_alignment_check,
674
	[ex_machine_check_vector] = &do_machine_check_vector,
675
	[ex_do_simd_coprocessor_error] = &do_simd_coprocessor_error,
676
	[ex_do_device_not_available] = &do_device_not_available,
677
#ifdef CONFIG_X86_32
678
	[ex_do_iret_error] = &do_iret_error,
679
#endif
680
};
681
682
#ifdef CONFIG_KGDB
683
#include <linux/kgdb.h>
684
685
static int __ipipe_xlate_signo[] = {
686
687
	[ex_do_divide_error] = SIGFPE,
688
	[ex_do_debug] = SIGTRAP,
689
	[2] = -1,
690
	[ex_do_int3] = SIGTRAP,
691
	[ex_do_overflow] = SIGSEGV,
692
	[ex_do_bounds] = SIGSEGV,
693
	[ex_do_invalid_op] = SIGILL,
694
	[ex_do_device_not_available] = -1,
695
	[8] = -1,
696
	[ex_do_coprocessor_segment_overrun] = SIGFPE,
697
	[ex_do_invalid_TSS] = SIGSEGV,
698
	[ex_do_segment_not_present] = SIGBUS,
699
	[ex_do_stack_segment] = SIGBUS,
700
	[ex_do_general_protection] = SIGSEGV,
701
	[ex_do_page_fault] = SIGSEGV,
702
	[ex_do_spurious_interrupt_bug] = -1,
703
	[ex_do_coprocessor_error] = -1,
704
	[ex_do_alignment_check] = SIGBUS,
705
	[ex_machine_check_vector] = -1,
706
	[ex_do_simd_coprocessor_error] = -1,
707
	[20 ... 31] = -1,
708
#ifdef CONFIG_X86_32
709
	[ex_do_iret_error] = SIGSEGV,
710
#endif
711
};
712
#endif /* CONFIG_KGDB */
713
714
int __ipipe_handle_exception(struct pt_regs *regs, long error_code, int vector)
715
{
716
	bool root_entry = false;
717
	unsigned long flags = 0;
718
	unsigned long cr2 = 0;
719
720
	if (ipipe_root_domain_p) {
721
		root_entry = true;
722
723
		local_save_flags(flags);
724
		/*
725
		 * Replicate hw interrupt state into the virtual mask
726
		 * before calling the I-pipe event handler over the
727
		 * root domain. Also required later when calling the
728
		 * Linux exception handler.
729
		 */
730
		if (irqs_disabled_hw())
731
			local_irq_disable();
732
	}
733
#ifdef CONFIG_KGDB
734
	/* catch exception KGDB is interested in over non-root domains */
735
	else if (__ipipe_xlate_signo[vector] >= 0 &&
736
		 !kgdb_handle_exception(vector, __ipipe_xlate_signo[vector],
737
					error_code, regs))
738
		return 1;
739
#endif /* CONFIG_KGDB */
740
741
	if (vector == ex_do_page_fault)
742
		cr2 = native_read_cr2();
743
744
	if (unlikely(ipipe_trap_notify(vector, regs))) {
745
		if (root_entry)
746
			local_irq_restore_nosync(flags);
747
		return 1;
748
	}
749
750
	if (likely(ipipe_root_domain_p)) {
751
		/*
752
		 * In case we faulted in the iret path, regs.flags do not
753
		 * match the root domain state. The fault handler or the
754
		 * low-level return code may evaluate it. Fix this up, either
755
		 * by the root state sampled on entry or, if we migrated to
756
		 * root, with the current state.
757
		 */
758
		__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) :
759
					raw_irqs_disabled(), regs);
760
	} else {
761
		/* Detect unhandled faults over non-root domains. */
762
		struct ipipe_domain *ipd = ipipe_current_domain;
763
764
		/* Switch to root so that Linux can handle the fault cleanly. */
765
		__ipipe_current_domain = ipipe_root_domain;
766
767
		ipipe_trace_panic_freeze();
768
769
		/* Always warn about user land and unfixable faults. */
770
		if ((error_code & 4) || !search_exception_tables(instruction_pointer(regs))) {
771
			printk(KERN_ERR "BUG: Unhandled exception over domain"
772
			       " %s at 0x%lx - switching to ROOT\n",
773
			       ipd->name, instruction_pointer(regs));
774
			dump_stack();
775
			ipipe_trace_panic_dump();
776
#ifdef CONFIG_IPIPE_DEBUG
777
		/* Also report fixable ones when debugging is enabled. */
778
		} else {
779
			printk(KERN_WARNING "WARNING: Fixable exception over "
780
			       "domain %s at 0x%lx - switching to ROOT\n",
781
			       ipd->name, instruction_pointer(regs));
782
			dump_stack();
783
			ipipe_trace_panic_dump();
784
#endif /* CONFIG_IPIPE_DEBUG */
785
		}
786
	}
787
788
	if (vector == ex_do_page_fault)
789
		write_cr2(cr2);
790
791
	__ipipe_std_extable[vector](regs, error_code);
792
793
	/*
794
	 * Relevant for 64-bit: Restore root domain state as the low-level
795
	 * return code will not align it to regs.flags.
796
	 */
797
	if (root_entry)
798
		local_irq_restore_nosync(flags);
799
800
	return 0;
801
}
802
803
int __ipipe_divert_exception(struct pt_regs *regs, int vector)
804
{
805
	bool root_entry = false;
806
	unsigned long flags = 0;
807
808
	if (ipipe_root_domain_p) {
809
		root_entry = true;
810
811
		local_save_flags(flags);
812
813
		if (irqs_disabled_hw()) {
814
			/*
815
			 * Same root state handling as in
816
			 * __ipipe_handle_exception.
817
			 */
818
			local_irq_disable();
819
		}
820
	}
821
#ifdef CONFIG_KGDB
822
	/* catch int1 and int3 over non-root domains */
823
	else {
824
#ifdef CONFIG_X86_32
825
		if (vector != ex_do_device_not_available)
826
#endif
827
		{
828
			unsigned int condition = 0;
829
830
			if (vector == 1)
831
				get_debugreg(condition, 6);
832
			if (!kgdb_handle_exception(vector, SIGTRAP, condition, regs))
833
				return 1;
834
		}
835
	}
836
#endif /* CONFIG_KGDB */
837
838
	if (unlikely(ipipe_trap_notify(vector, regs))) {
839
		if (root_entry)
840
			local_irq_restore_nosync(flags);
841
		return 1;
842
	}
843
844
	/* see __ipipe_handle_exception */
845
	if (likely(ipipe_root_domain_p))
846
		__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) :
847
					raw_irqs_disabled(), regs);
848
	/*
849
	 * No need to restore root state in the 64-bit case, the Linux handler
850
	 * and the return code will take care of it.
851
	 */
852
853
	return 0;
854
}
855
856
int __ipipe_syscall_root(struct pt_regs *regs)
857
{
858
	struct ipipe_percpu_domain_data *p;
859
	unsigned long flags;
860
        int ret;
861
862
        /*
863
         * This routine either returns:
864
         * 0 -- if the syscall is to be passed to Linux;
865
         * >0 -- if the syscall should not be passed to Linux, and no
866
         * tail work should be performed;
867
         * <0 -- if the syscall should not be passed to Linux but the
868
         * tail work has to be performed (for handling signals etc).
869
         */
870
871
        if (!__ipipe_syscall_watched_p(current, regs->orig_ax) ||
872
            !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
873
                return 0;
874
875
        ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
876
        if (!ipipe_root_domain_p) {
877
#ifdef CONFIG_X86_64
878
		local_irq_disable_hw();
879
#endif
880
		return 1;
881
	}
882
883
	local_irq_save_hw(flags);
884
	p = ipipe_root_cpudom_ptr();
885
#ifdef CONFIG_X86_32
886
	/*
887
	 * Fix-up only required on 32-bit as only here the IRET return code
888
	 * will evaluate the flags.
889
	 */
890
	__fixup_if(test_bit(IPIPE_STALL_FLAG, &p->status), regs);
891
#endif
892
	/*
893
	 * If allowed, sync pending VIRQs before _TIF_NEED_RESCHED is
894
	 * tested.
895
	 */
896
	if (__ipipe_ipending_p(p))
897
		__ipipe_sync_pipeline(IPIPE_IRQ_DOVIRT);
898
#ifdef CONFIG_X86_64
899
	if (!ret)
900
#endif
901
		local_irq_restore_hw(flags);
902
903
	return -ret;
904
}
905
906
/*
907
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
908
 * interrupt protection log is maintained here for each domain.  Hw
909
 * interrupts are off on entry.
910
 */
911
int __ipipe_handle_irq(struct pt_regs *regs)
912
{
913
	struct ipipe_domain *this_domain, *next_domain;
914
	unsigned int vector = regs->orig_ax, irq;
915
	struct list_head *head, *pos;
916
	int m_ack;
917
918
	if ((long)regs->orig_ax < 0) {
919
		vector = ~vector;
920
#ifdef CONFIG_X86_LOCAL_APIC
921
		if (vector >= FIRST_SYSTEM_VECTOR)
922
			irq = ipipe_apic_vector_irq(vector);
923
#ifdef CONFIG_SMP
924
		else if (vector == IRQ_MOVE_CLEANUP_VECTOR)
925
			irq = vector;
926
#endif /* CONFIG_SMP */
927
		else
928
#endif /* CONFIG_X86_LOCAL_APIC */
929
			irq = __get_cpu_var(vector_irq)[vector];
930
		m_ack = 0;
931
	} else { /* This is a self-triggered one. */
932
		irq = vector;
933
		m_ack = 1;
934
	}
935
936
	this_domain = ipipe_current_domain;
937
938
	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
939
		head = &this_domain->p_link;
940
	else {
941
		head = __ipipe_pipeline.next;
942
		next_domain = list_entry(head, struct ipipe_domain, p_link);
943
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
944
			if (!m_ack && next_domain->irqs[irq].acknowledge)
945
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
946
			__ipipe_dispatch_wired(next_domain, irq);
947
			goto finalize_nosync;
948
		}
949
	}
950
951
	/* Ack the interrupt. */
952
953
	pos = head;
954
955
	while (pos != &__ipipe_pipeline) {
956
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
957
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
958
			__ipipe_set_irq_pending(next_domain, irq);
959
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
960
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
961
				m_ack = 1;
962
			}
963
		}
964
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
965
			break;
966
		pos = next_domain->p_link.next;
967
	}
968
969
	/*
970
	 * If the interrupt preempted the head domain, then do not
971
	 * even try to walk the pipeline, unless an interrupt is
972
	 * pending for it.
973
	 */
974
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
975
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
976
		goto finalize_nosync;
977
978
	/*
979
	 * Now walk the pipeline, yielding control to the highest
980
	 * priority domain that has pending interrupt(s) or
981
	 * immediately to the current domain if the interrupt has been
982
	 * marked as 'sticky'. This search does not go beyond the
983
	 * current domain in the pipeline.
984
	 */
985
986
	__ipipe_walk_pipeline(head);
987
988
finalize_nosync:
989
990
	/*
991
	 * Given our deferred dispatching model for regular IRQs, we
992
	 * only record CPU regs for the last timer interrupt, so that
993
	 * the timer handler charges CPU times properly. It is assumed
994
	 * that other interrupt handlers don't actually care for such
995
	 * information.
996
	 */
997
998
	if (irq == __ipipe_tick_irq) {
999
		struct pt_regs *tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
1000
		tick_regs->flags = regs->flags;
1001
		tick_regs->cs = regs->cs;
1002
		tick_regs->ip = regs->ip;
1003
		tick_regs->bp = regs->bp;
1004
#ifdef CONFIG_X86_64
1005
		tick_regs->ss = regs->ss;
1006
		tick_regs->sp = regs->sp;
1007
#endif
1008
		if (!ipipe_root_domain_p)
1009
			tick_regs->flags &= ~X86_EFLAGS_IF;
1010
	}
1011
1012
	if (!ipipe_root_domain_p ||
1013
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
1014
		return 0;
1015
1016
#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
1017
	/*
1018
	 * Prevent a spurious rescheduling from being triggered on
1019
	 * preemptible kernels along the way out through
1020
	 * ret_from_intr.
1021
	 */
1022
	if ((long)regs->orig_ax < 0)
1023
		__set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
1024
#endif	/* CONFIG_SMP */
1025
1026
	return 1;
1027
}
1028
1029
int __ipipe_check_tickdev(const char *devname)
1030
{
1031
#ifdef CONFIG_X86_LOCAL_APIC
1032
	if (!strcmp(devname, "lapic"))
1033
		return __ipipe_check_lapic();
1034
#endif
1035
1036
	return 1;
1037
}
1038
1039
void *ipipe_irq_handler = __ipipe_handle_irq;
1040
EXPORT_SYMBOL(ipipe_irq_handler);
1041
EXPORT_SYMBOL(io_apic_irqs);
1042
EXPORT_PER_CPU_SYMBOL(__ipipe_tick_regs);
1043
__attribute__((regparm(3))) void do_notify_resume(struct pt_regs *, void *, __u32);
1044
EXPORT_SYMBOL(do_notify_resume);
1045
extern void *sys_call_table;
1046
EXPORT_SYMBOL(sys_call_table);
1047
#ifdef CONFIG_X86_32
1048
extern void ret_from_intr(void);
1049
EXPORT_SYMBOL(ret_from_intr);
1050
extern spinlock_t i8259A_lock;
1051
extern struct desc_struct idt_table[];
1052
#else
1053
extern ipipe_spinlock_t i8259A_lock;
1054
extern gate_desc idt_table[];
1055
#endif
1056
EXPORT_PER_CPU_SYMBOL(vector_irq);
1057
EXPORT_SYMBOL(idt_table);
1058
EXPORT_SYMBOL(i8259A_lock);
1059
EXPORT_SYMBOL(__ipipe_sync_stage);
1060
EXPORT_SYMBOL(kill_proc_info);
1061
EXPORT_SYMBOL(find_task_by_pid_ns);
1062
1063
EXPORT_SYMBOL(__ipipe_tick_irq);
1064
1065
EXPORT_SYMBOL_GPL(irq_to_desc);
1066
struct task_struct *__switch_to(struct task_struct *prev_p,
1067
				struct task_struct *next_p);
1068
EXPORT_SYMBOL_GPL(__switch_to);
1069
EXPORT_SYMBOL_GPL(show_stack);
1070
1071
EXPORT_PER_CPU_SYMBOL_GPL(init_tss);
1072
#ifdef CONFIG_SMP
1073
EXPORT_PER_CPU_SYMBOL_GPL(cpu_tlbstate);
1074
#endif /* CONFIG_SMP */
1075
1076
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
1077
EXPORT_SYMBOL(tasklist_lock);
1078
#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
1079
1080
#if defined(CONFIG_CC_STACKPROTECTOR) && defined(CONFIG_X86_64)
1081
EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
1082
#endif
1083
1084
EXPORT_SYMBOL(__ipipe_halt_root);
(-)a/arch/x86/kernel/irq.c (-3 / +4 lines)
Lines 38-44 void ack_bad_irq(unsigned int irq) Link Here
38
	 * completely.
38
	 * completely.
39
	 * But only ack when the APIC is enabled -AK
39
	 * But only ack when the APIC is enabled -AK
40
	 */
40
	 */
41
	ack_APIC_irq();
41
	__ack_APIC_irq();
42
}
42
}
43
43
44
#define irq_stats(x)		(&per_cpu(irq_stat, x))
44
#define irq_stats(x)		(&per_cpu(irq_stat, x))
Lines 231-241 unsigned int __irq_entry do_IRQ(struct pt_regs *regs) Link Here
231
	unsigned vector = ~regs->orig_ax;
231
	unsigned vector = ~regs->orig_ax;
232
	unsigned irq;
232
	unsigned irq;
233
233
234
	irq = __get_cpu_var(vector_irq)[vector];
235
	__ipipe_move_root_irq(irq);
236
234
	exit_idle();
237
	exit_idle();
235
	irq_enter();
238
	irq_enter();
236
239
237
	irq = __get_cpu_var(vector_irq)[vector];
238
239
	if (!handle_irq(irq, regs)) {
240
	if (!handle_irq(irq, regs)) {
240
		ack_APIC_irq();
241
		ack_APIC_irq();
241
242
(-)a/arch/x86/kernel/irqinit.c (+12 lines)
Lines 159-169 static void __init smp_intr_init(void) Link Here
159
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
159
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
160
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
160
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
161
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
161
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
162
#ifndef CONFIG_IPIPE
162
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
163
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
163
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
164
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
164
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
165
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
165
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
166
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
166
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
167
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
168
#endif
167
169
168
	/* IPI for generic function call */
170
	/* IPI for generic function call */
169
	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
171
	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
Lines 178-183 static void __init smp_intr_init(void) Link Here
178
180
179
	/* IPI used for rebooting/stopping */
181
	/* IPI used for rebooting/stopping */
180
	alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
182
	alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
183
#if defined(CONFIG_IPIPE) && defined(CONFIG_X86_32)
184
	/* IPI for critical lock */
185
	alloc_intr_gate(IPIPE_CRITICAL_VECTOR, ipipe_ipiX);
186
#endif
181
#endif
187
#endif
182
#endif /* CONFIG_SMP */
188
#endif /* CONFIG_SMP */
183
}
189
}
Lines 212-217 static void __init apic_intr_init(void) Link Here
212
	alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
218
	alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
213
# endif
219
# endif
214
220
221
#if defined(CONFIG_IPIPE) && defined(CONFIG_X86_32)
222
	alloc_intr_gate(IPIPE_SERVICE_VECTOR0, ipipe_ipi0);
223
	alloc_intr_gate(IPIPE_SERVICE_VECTOR1, ipipe_ipi1);
224
	alloc_intr_gate(IPIPE_SERVICE_VECTOR2, ipipe_ipi2);
225
	alloc_intr_gate(IPIPE_SERVICE_VECTOR3, ipipe_ipi3);
226
#endif
215
#endif
227
#endif
216
}
228
}
217
229
(-)a/arch/x86/kernel/process.c (-2 / +19 lines)
Lines 35-41 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) Link Here
35
			return -ENOMEM;
35
			return -ENOMEM;
36
		WARN_ON((unsigned long)dst->thread.xstate & 15);
36
		WARN_ON((unsigned long)dst->thread.xstate & 15);
37
		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
37
		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
38
	} else {
39
#ifdef CONFIG_IPIPE
40
		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
41
						      GFP_KERNEL);
42
		if (!dst->thread.xstate)
43
			return -ENOMEM;
44
#endif
38
	}
45
	}
46
39
	return 0;
47
	return 0;
40
}
48
}
41
49
Lines 61-66 void arch_task_cache_init(void) Link Here
61
        	kmem_cache_create("task_xstate", xstate_size,
69
        	kmem_cache_create("task_xstate", xstate_size,
62
				  __alignof__(union thread_xstate),
70
				  __alignof__(union thread_xstate),
63
				  SLAB_PANIC | SLAB_NOTRACK, NULL);
71
				  SLAB_PANIC | SLAB_NOTRACK, NULL);
72
#ifdef CONFIG_IPIPE
73
	current->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
74
						  GFP_KERNEL);
75
#endif
64
}
76
}
65
77
66
/*
78
/*
Lines 309-315 EXPORT_SYMBOL(default_idle); Link Here
309
321
310
void stop_this_cpu(void *dummy)
322
void stop_this_cpu(void *dummy)
311
{
323
{
312
	local_irq_disable();
324
	local_irq_disable_hw();
313
	/*
325
	/*
314
	 * Remove this CPU:
326
	 * Remove this CPU:
315
	 */
327
	 */
Lines 534-539 static void c1e_idle(void) Link Here
534
546
535
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
547
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
536
{
548
{
549
#ifdef CONFIG_IPIPE
550
#define default_to_mwait force_mwait
551
#else
552
#define default_to_mwait 1
553
#endif
537
#ifdef CONFIG_SMP
554
#ifdef CONFIG_SMP
538
	if (pm_idle == poll_idle && smp_num_siblings > 1) {
555
	if (pm_idle == poll_idle && smp_num_siblings > 1) {
539
		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
556
		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
Lines 543-549 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) Link Here
543
	if (pm_idle)
560
	if (pm_idle)
544
		return;
561
		return;
545
562
546
	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
563
	if (default_to_mwait && cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
547
		/*
564
		/*
548
		 * One CPU supports mwait => All CPUs supports mwait
565
		 * One CPU supports mwait => All CPUs supports mwait
549
		 */
566
		 */
(-)a/arch/x86/kernel/process_32.c (-1 / +3 lines)
Lines 305-314 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) Link Here
305
	regs->cs		= __USER_CS;
305
	regs->cs		= __USER_CS;
306
	regs->ip		= new_ip;
306
	regs->ip		= new_ip;
307
	regs->sp		= new_sp;
307
	regs->sp		= new_sp;
308
#ifndef CONFIG_IPIPE	/* Lazily handled, init_fpu() will reset the state. */
308
	/*
309
	/*
309
	 * Free the old FP and other extended state
310
	 * Free the old FP and other extended state
310
	 */
311
	 */
311
	free_thread_xstate(current);
312
	free_thread_xstate(current);
313
#endif
312
}
314
}
313
EXPORT_SYMBOL_GPL(start_thread);
315
EXPORT_SYMBOL_GPL(start_thread);
314
316
Lines 345-351 __switch_to(struct task_struct *prev_p, struct task_struct *next_p) Link Here
345
{
347
{
346
	struct thread_struct *prev = &prev_p->thread,
348
	struct thread_struct *prev = &prev_p->thread,
347
				 *next = &next_p->thread;
349
				 *next = &next_p->thread;
348
	int cpu = smp_processor_id();
350
	int cpu = raw_smp_processor_id();
349
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
351
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
350
	bool preload_fpu;
352
	bool preload_fpu;
351
353
(-)a/arch/x86/kernel/process_64.c (-1 / +6 lines)
Lines 58-63 asmlinkage extern void ret_from_fork(void); Link Here
58
DEFINE_PER_CPU(unsigned long, old_rsp);
58
DEFINE_PER_CPU(unsigned long, old_rsp);
59
static DEFINE_PER_CPU(unsigned char, is_idle);
59
static DEFINE_PER_CPU(unsigned char, is_idle);
60
60
61
asmlinkage extern void thread_return(void);
62
61
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
63
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
62
64
63
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
65
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
Lines 292-297 int copy_thread(unsigned long clone_flags, unsigned long sp, Link Here
292
	p->thread.sp = (unsigned long) childregs;
294
	p->thread.sp = (unsigned long) childregs;
293
	p->thread.sp0 = (unsigned long) (childregs+1);
295
	p->thread.sp0 = (unsigned long) (childregs+1);
294
	p->thread.usersp = me->thread.usersp;
296
	p->thread.usersp = me->thread.usersp;
297
 	p->thread.rip = (unsigned long) thread_return;
295
298
296
	set_tsk_thread_flag(p, TIF_FORK);
299
	set_tsk_thread_flag(p, TIF_FORK);
297
300
Lines 358-367 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) Link Here
358
	regs->ss		= __USER_DS;
361
	regs->ss		= __USER_DS;
359
	regs->flags		= 0x200;
362
	regs->flags		= 0x200;
360
	set_fs(USER_DS);
363
	set_fs(USER_DS);
364
#ifndef CONFIG_IPIPE	/* Lazily handled, init_fpu() will reset the state. */
361
	/*
365
	/*
362
	 * Free the old FP and other extended state
366
	 * Free the old FP and other extended state
363
	 */
367
	 */
364
	free_thread_xstate(current);
368
	free_thread_xstate(current);
369
#endif
365
}
370
}
366
EXPORT_SYMBOL_GPL(start_thread);
371
EXPORT_SYMBOL_GPL(start_thread);
367
372
Lines 380-386 __switch_to(struct task_struct *prev_p, struct task_struct *next_p) Link Here
380
{
385
{
381
	struct thread_struct *prev = &prev_p->thread;
386
	struct thread_struct *prev = &prev_p->thread;
382
	struct thread_struct *next = &next_p->thread;
387
	struct thread_struct *next = &next_p->thread;
383
	int cpu = smp_processor_id();
388
	int cpu = raw_smp_processor_id();
384
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
389
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
385
	unsigned fsindex, gsindex;
390
	unsigned fsindex, gsindex;
386
	bool preload_fpu;
391
	bool preload_fpu;
(-)a/arch/x86/kernel/smp.c (-2 / +2 lines)
Lines 184-192 static void native_smp_send_stop(void) Link Here
184
			udelay(1);
184
			udelay(1);
185
	}
185
	}
186
186
187
	local_irq_save(flags);
187
	local_irq_save_hw(flags);
188
	disable_local_APIC();
188
	disable_local_APIC();
189
	local_irq_restore(flags);
189
	local_irq_restore_hw(flags);
190
}
190
}
191
191
192
/*
192
/*
(-)a/arch/x86/kernel/smpboot.c (-4 / +4 lines)
Lines 266-272 static void __cpuinit smp_callin(void) Link Here
266
/*
266
/*
267
 * Activate a secondary processor.
267
 * Activate a secondary processor.
268
 */
268
 */
269
notrace static void __cpuinit start_secondary(void *unused)
269
static void __cpuinit start_secondary(void *unused)
270
{
270
{
271
	/*
271
	/*
272
	 * Don't put *anything* before cpu_init(), SMP booting is too
272
	 * Don't put *anything* before cpu_init(), SMP booting is too
Lines 837-843 do_rest: Link Here
837
int __cpuinit native_cpu_up(unsigned int cpu)
837
int __cpuinit native_cpu_up(unsigned int cpu)
838
{
838
{
839
	int apicid = apic->cpu_present_to_apicid(cpu);
839
	int apicid = apic->cpu_present_to_apicid(cpu);
840
	unsigned long flags;
840
 	unsigned long flags, _flags;
841
	int err;
841
	int err;
842
842
843
	WARN_ON(irqs_disabled());
843
	WARN_ON(irqs_disabled());
Lines 889-897 int __cpuinit native_cpu_up(unsigned int cpu) Link Here
889
	 * Check TSC synchronization with the AP (keep irqs disabled
889
	 * Check TSC synchronization with the AP (keep irqs disabled
890
	 * while doing so):
890
	 * while doing so):
891
	 */
891
	 */
892
	local_irq_save(flags);
892
	local_irq_save_full(flags, _flags);
893
	check_tsc_sync_source(cpu);
893
	check_tsc_sync_source(cpu);
894
	local_irq_restore(flags);
894
	local_irq_restore_full(flags, _flags);
895
895
896
	while (!cpu_online(cpu)) {
896
	while (!cpu_online(cpu)) {
897
		cpu_relax();
897
		cpu_relax();
(-)a/arch/x86/kernel/time.c (-2 / +3 lines)
Lines 70-80 static irqreturn_t timer_interrupt(int irq, void *dev_id) Link Here
70
		 * manually to deassert NMI lines for the watchdog if run
70
		 * manually to deassert NMI lines for the watchdog if run
71
		 * on an 82489DX-based system.
71
		 * on an 82489DX-based system.
72
		 */
72
		 */
73
		spin_lock(&i8259A_lock);
73
		unsigned long flags;
74
		spin_lock_irqsave_cond(&i8259A_lock,flags);
74
		outb(0x0c, PIC_MASTER_OCW3);
75
		outb(0x0c, PIC_MASTER_OCW3);
75
		/* Ack the IRQ; AEOI will end it automatically. */
76
		/* Ack the IRQ; AEOI will end it automatically. */
76
		inb(PIC_MASTER_POLL);
77
		inb(PIC_MASTER_POLL);
77
		spin_unlock(&i8259A_lock);
78
		spin_unlock_irqrestore_cond(&i8259A_lock,flags);
78
	}
79
	}
79
80
80
	global_clock_event->event_handler(global_clock_event);
81
	global_clock_event->event_handler(global_clock_event);
(-)a/arch/x86/kernel/traps.c (+4 lines)
Lines 805-810 void __math_state_restore(void) Link Here
805
	 */
805
	 */
806
	if (unlikely(restore_fpu_checking(tsk))) {
806
	if (unlikely(restore_fpu_checking(tsk))) {
807
		stts();
807
		stts();
808
		local_irq_enable_hw_cond();
808
		force_sig(SIGSEGV, tsk);
809
		force_sig(SIGSEGV, tsk);
809
		return;
810
		return;
810
	}
811
	}
Lines 827-832 asmlinkage void math_state_restore(void) Link Here
827
{
828
{
828
	struct thread_info *thread = current_thread_info();
829
	struct thread_info *thread = current_thread_info();
829
	struct task_struct *tsk = thread->task;
830
	struct task_struct *tsk = thread->task;
831
	unsigned long flags;
830
832
831
	if (!tsk_used_math(tsk)) {
833
	if (!tsk_used_math(tsk)) {
832
		local_irq_enable();
834
		local_irq_enable();
Lines 843-851 asmlinkage void math_state_restore(void) Link Here
843
		local_irq_disable();
845
		local_irq_disable();
844
	}
846
	}
845
847
848
  	local_irq_save_hw_cond(flags);
846
	clts();				/* Allow maths ops (or we recurse) */
849
	clts();				/* Allow maths ops (or we recurse) */
847
850
848
	__math_state_restore();
851
	__math_state_restore();
852
 	local_irq_restore_hw_cond(flags);
849
}
853
}
850
EXPORT_SYMBOL_GPL(math_state_restore);
854
EXPORT_SYMBOL_GPL(math_state_restore);
851
855
(-)a/arch/x86/kernel/vm86_32.c (+4 lines)
Lines 148-159 struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) Link Here
148
		do_exit(SIGSEGV);
148
		do_exit(SIGSEGV);
149
	}
149
	}
150
150
151
 	local_irq_disable_hw_cond();
151
	tss = &per_cpu(init_tss, get_cpu());
152
	tss = &per_cpu(init_tss, get_cpu());
152
	current->thread.sp0 = current->thread.saved_sp0;
153
	current->thread.sp0 = current->thread.saved_sp0;
153
	current->thread.sysenter_cs = __KERNEL_CS;
154
	current->thread.sysenter_cs = __KERNEL_CS;
154
	load_sp0(tss, &current->thread);
155
	load_sp0(tss, &current->thread);
155
	current->thread.saved_sp0 = 0;
156
	current->thread.saved_sp0 = 0;
156
	put_cpu();
157
	put_cpu();
158
 	local_irq_enable_hw_cond();
157
159
158
	ret = KVM86->regs32;
160
	ret = KVM86->regs32;
159
161
Lines 324-335 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk Link Here
324
	tsk->thread.saved_fs = info->regs32->fs;
326
	tsk->thread.saved_fs = info->regs32->fs;
325
	tsk->thread.saved_gs = get_user_gs(info->regs32);
327
	tsk->thread.saved_gs = get_user_gs(info->regs32);
326
328
329
 	local_irq_disable_hw_cond();
327
	tss = &per_cpu(init_tss, get_cpu());
330
	tss = &per_cpu(init_tss, get_cpu());
328
	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
331
	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
329
	if (cpu_has_sep)
332
	if (cpu_has_sep)
330
		tsk->thread.sysenter_cs = 0;
333
		tsk->thread.sysenter_cs = 0;
331
	load_sp0(tss, &tsk->thread);
334
	load_sp0(tss, &tsk->thread);
332
	put_cpu();
335
	put_cpu();
336
 	local_irq_enable_hw_cond();
333
337
334
	tsk->thread.screen_bitmap = info->screen_bitmap;
338
	tsk->thread.screen_bitmap = info->screen_bitmap;
335
	if (info->flags & VM86_SCREEN_BITMAP)
339
	if (info->flags & VM86_SCREEN_BITMAP)
(-)a/arch/x86/lib/mmx_32.c (-1 / +1 lines)
Lines 30-36 void *_mmx_memcpy(void *to, const void *from, size_t len) Link Here
30
	void *p;
30
	void *p;
31
	int i;
31
	int i;
32
32
33
	if (unlikely(in_interrupt()))
33
	if (unlikely(!ipipe_root_domain_p || in_interrupt()))
34
		return __memcpy(to, from, len);
34
		return __memcpy(to, from, len);
35
35
36
	p = to;
36
	p = to;
(-)a/arch/x86/lib/thunk_64.S (+4 lines)
Lines 65-70 Link Here
65
	thunk lockdep_sys_exit_thunk,lockdep_sys_exit
65
	thunk lockdep_sys_exit_thunk,lockdep_sys_exit
66
#endif
66
#endif
67
	
67
	
68
#ifdef CONFIG_IPIPE
69
	thunk_retrax __ipipe_syscall_root_thunk,__ipipe_syscall_root
70
#endif
71
	
68
	/* SAVE_ARGS below is used only for the .cfi directives it contains. */
72
	/* SAVE_ARGS below is used only for the .cfi directives it contains. */
69
	CFI_STARTPROC
73
	CFI_STARTPROC
70
	SAVE_ARGS
74
	SAVE_ARGS
(-)a/arch/x86/mm/fault.c (-37 / +86 lines)
Lines 1-3 Link Here
1
1
/*
2
/*
2
 *  Copyright (C) 1995  Linus Torvalds
3
 *  Copyright (C) 1995  Linus Torvalds
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
Lines 323-365 out: Link Here
323
324
324
#else /* CONFIG_X86_64: */
325
#else /* CONFIG_X86_64: */
325
326
326
void vmalloc_sync_all(void)
327
static inline int vmalloc_sync_one(pgd_t *pgd, unsigned long address)
327
{
328
	unsigned long address;
329
330
	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
331
	     address += PGDIR_SIZE) {
332
333
		const pgd_t *pgd_ref = pgd_offset_k(address);
334
		unsigned long flags;
335
		struct page *page;
336
337
		if (pgd_none(*pgd_ref))
338
			continue;
339
340
		spin_lock_irqsave(&pgd_lock, flags);
341
		list_for_each_entry(page, &pgd_list, lru) {
342
			pgd_t *pgd;
343
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
344
			if (pgd_none(*pgd))
345
				set_pgd(pgd, *pgd_ref);
346
			else
347
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
348
		}
349
		spin_unlock_irqrestore(&pgd_lock, flags);
350
	}
351
}
352
353
/*
354
 * 64-bit:
355
 *
356
 *   Handle a fault on the vmalloc area
357
 *
358
 * This assumes no large pages in there.
359
 */
360
static noinline int vmalloc_fault(unsigned long address)
361
{
328
{
362
	pgd_t *pgd, *pgd_ref;
329
	pgd_t *pgd_ref;
363
	pud_t *pud, *pud_ref;
330
	pud_t *pud, *pud_ref;
364
	pmd_t *pmd, *pmd_ref;
331
	pmd_t *pmd, *pmd_ref;
365
	pte_t *pte, *pte_ref;
332
	pte_t *pte, *pte_ref;
Lines 373-379 static noinline int vmalloc_fault(unsigned long address) Link Here
373
	 * happen within a race in page table update. In the later
340
	 * happen within a race in page table update. In the later
374
	 * case just flush:
341
	 * case just flush:
375
	 */
342
	 */
376
	pgd = pgd_offset(current->active_mm, address);
377
	pgd_ref = pgd_offset_k(address);
343
	pgd_ref = pgd_offset_k(address);
378
	if (pgd_none(*pgd_ref))
344
	if (pgd_none(*pgd_ref))
379
		return -1;
345
		return -1;
Lines 421-426 static noinline int vmalloc_fault(unsigned long address) Link Here
421
	return 0;
387
	return 0;
422
}
388
}
423
389
390
void vmalloc_sync_all(void)
391
{
392
	unsigned long address;
393
394
	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
395
	     address += PGDIR_SIZE) {
396
397
		const pgd_t *pgd_ref = pgd_offset_k(address);
398
		unsigned long flags;
399
		struct page *page;
400
401
		if (pgd_none(*pgd_ref))
402
			continue;
403
404
		spin_lock_irqsave(&pgd_lock, flags);
405
		list_for_each_entry(page, &pgd_list, lru) {
406
			pgd_t *pgd;
407
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
408
			if (pgd_none(*pgd))
409
				set_pgd(pgd, *pgd_ref);
410
			else
411
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
412
		}
413
		spin_unlock_irqrestore(&pgd_lock, flags);
414
	}
415
}
416
417
/*
418
 * 64-bit:
419
 *
420
 *   Handle a fault on the vmalloc area
421
 *
422
 * This assumes no large pages in there.
423
 */
424
static noinline int vmalloc_fault(unsigned long address)
425
{
426
	pgd_t *pgd = pgd = pgd_offset(current->active_mm, address);
427
	return vmalloc_sync_one(pgd, address);
428
}
429
424
static const char errata93_warning[] =
430
static const char errata93_warning[] =
425
KERN_ERR 
431
KERN_ERR 
426
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
432
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
Lines 958-963 do_page_fault(struct pt_regs *regs, unsigned long error_code) Link Here
958
	/* Get the faulting address: */
964
	/* Get the faulting address: */
959
	address = read_cr2();
965
	address = read_cr2();
960
966
967
	if (!__ipipe_pipeline_head_p(ipipe_root_domain))
968
		local_irq_enable_hw_cond();
969
961
	/*
970
	/*
962
	 * Detect and handle instructions that would cause a page fault for
971
	 * Detect and handle instructions that would cause a page fault for
963
	 * both a tracked kernel page and a userspace page.
972
	 * both a tracked kernel page and a userspace page.
Lines 1137-1139 good_area: Link Here
1137
1146
1138
	up_read(&mm->mmap_sem);
1147
	up_read(&mm->mmap_sem);
1139
}
1148
}
1149
1150
#ifdef CONFIG_IPIPE
1151
void __ipipe_pin_range_globally(unsigned long start, unsigned long end)
1152
{
1153
#ifdef CONFIG_X86_32
1154
	unsigned long next, addr = start;
1155
1156
	do {
1157
		unsigned long flags;
1158
		struct page *page;
1159
1160
		next = pgd_addr_end(addr, end);
1161
		spin_lock_irqsave(&pgd_lock, flags);
1162
		list_for_each_entry(page, &pgd_list, lru)
1163
			vmalloc_sync_one(page_address(page), addr);
1164
		spin_unlock_irqrestore(&pgd_lock, flags);
1165
1166
	} while (addr = next, addr != end);
1167
#else
1168
	unsigned long next, addr = start;
1169
	int ret = 0;
1170
1171
	do {
1172
		struct page *page;
1173
1174
		next = pgd_addr_end(addr, end);
1175
		spin_lock(&pgd_lock);
1176
		list_for_each_entry(page, &pgd_list, lru) {
1177
			pgd_t *pgd;
1178
			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
1179
			ret = vmalloc_sync_one(pgd, addr);
1180
			if (ret)
1181
				break;
1182
		}
1183
		spin_unlock(&pgd_lock);
1184
		addr = next;
1185
	} while (!ret && addr != end);
1186
#endif
1187
}
1188
#endif /* CONFIG_IPIPE */
(-)a/arch/x86/mm/tlb.c (+7 lines)
Lines 57-67 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; Link Here
57
 */
57
 */
58
void leave_mm(int cpu)
58
void leave_mm(int cpu)
59
{
59
{
60
 	unsigned long flags;
61
60
	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
62
	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
61
		BUG();
63
		BUG();
64
 	local_irq_save_hw_cond(flags);
62
	cpumask_clear_cpu(cpu,
65
	cpumask_clear_cpu(cpu,
63
			  mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
66
			  mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
64
	load_cr3(swapper_pg_dir);
67
	load_cr3(swapper_pg_dir);
68
 	local_irq_restore_hw_cond(flags);
65
}
69
}
66
EXPORT_SYMBOL_GPL(leave_mm);
70
EXPORT_SYMBOL_GPL(leave_mm);
67
71
Lines 192-197 static void flush_tlb_others_ipi(const struct cpumask *cpumask, Link Here
192
		apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
196
		apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
193
			      INVALIDATE_TLB_VECTOR_START + sender);
197
			      INVALIDATE_TLB_VECTOR_START + sender);
194
198
199
#ifdef CONFIG_IPIPE
200
		WARN_ON_ONCE(irqs_disabled_hw());
201
#endif
195
		while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
202
		while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
196
			cpu_relax();
203
			cpu_relax();
197
	}
204
	}
(-)a/drivers/pci/htirq.c (-1 / +1 lines)
Lines 21-27 Link Here
21
 * With multiple simultaneous hypertransport irq devices it might pay
21
 * With multiple simultaneous hypertransport irq devices it might pay
22
 * to make this more fine grained.  But start with simple, stupid, and correct.
22
 * to make this more fine grained.  But start with simple, stupid, and correct.
23
 */
23
 */
24
static DEFINE_SPINLOCK(ht_irq_lock);
24
static IPIPE_DEFINE_SPINLOCK(ht_irq_lock);
25
25
26
struct ht_irq_cfg {
26
struct ht_irq_cfg {
27
	struct pci_dev *dev;
27
	struct pci_dev *dev;
(-)a/drivers/serial/8250.c (+47 lines)
Lines 3016-3021 static int serial8250_resume(struct platform_device *dev) Link Here
3016
	return 0;
3016
	return 0;
3017
}
3017
}
3018
3018
3019
#if defined(CONFIG_IPIPE_DEBUG) && defined(CONFIG_SERIAL_8250_CONSOLE)
3020
3021
#include <stdarg.h>
3022
3023
void __weak __ipipe_serial_debug(const char *fmt, ...)
3024
{
3025
        struct uart_8250_port *up = &serial8250_ports[0];
3026
        unsigned int ier, count;
3027
        unsigned long flags;
3028
        char buf[128];
3029
        va_list ap;
3030
3031
        va_start(ap, fmt);
3032
        vsprintf(buf, fmt, ap);
3033
        va_end(ap);
3034
        count = strlen(buf);
3035
3036
        touch_nmi_watchdog();
3037
3038
        local_irq_save_hw(flags);
3039
3040
        /*
3041
         *      First save the IER then disable the interrupts
3042
        */
3043
        ier = serial_in(up, UART_IER);
3044
3045
        if (up->capabilities & UART_CAP_UUE)
3046
                serial_out(up, UART_IER, UART_IER_UUE);
3047
        else
3048
                serial_out(up, UART_IER, 0);
3049
3050
        uart_console_write(&up->port, buf, count, serial8250_console_putchar);
3051
3052
        /*
3053
         *      Finally, wait for transmitter to become empty
3054
         *      and restore the IER
3055
         */
3056
        wait_for_xmitr(up, BOTH_EMPTY);
3057
        serial_out(up, UART_IER, ier);
3058
3059
        local_irq_restore_hw(flags);
3060
}
3061
3062
EXPORT_SYMBOL(__ipipe_serial_debug);
3063
3064
#endif
3065
3019
static struct platform_driver serial8250_isa_driver = {
3066
static struct platform_driver serial8250_isa_driver = {
3020
	.probe		= serial8250_probe,
3067
	.probe		= serial8250_probe,
3021
	.remove		= __devexit_p(serial8250_remove),
3068
	.remove		= __devexit_p(serial8250_remove),
(-)a/fs/exec.c (+3 lines)
Lines 715-720 static int exec_mmap(struct mm_struct *mm) Link Here
715
{
715
{
716
	struct task_struct *tsk;
716
	struct task_struct *tsk;
717
	struct mm_struct * old_mm, *active_mm;
717
	struct mm_struct * old_mm, *active_mm;
718
	unsigned long flags;
718
719
719
	/* Notify parent that we're no longer interested in the old VM */
720
	/* Notify parent that we're no longer interested in the old VM */
720
	tsk = current;
721
	tsk = current;
Lines 737-744 static int exec_mmap(struct mm_struct *mm) Link Here
737
	task_lock(tsk);
738
	task_lock(tsk);
738
	active_mm = tsk->active_mm;
739
	active_mm = tsk->active_mm;
739
	tsk->mm = mm;
740
	tsk->mm = mm;
741
	ipipe_mm_switch_protect(flags);
740
	tsk->active_mm = mm;
742
	tsk->active_mm = mm;
741
	activate_mm(active_mm, mm);
743
	activate_mm(active_mm, mm);
744
	ipipe_mm_switch_unprotect(flags);
742
	task_unlock(tsk);
745
	task_unlock(tsk);
743
	arch_pick_mmap_layout(mm);
746
	arch_pick_mmap_layout(mm);
744
	if (old_mm) {
747
	if (old_mm) {
(-)a/include/asm-generic/atomic.h (-6 / +6 lines)
Lines 60-70 static inline int atomic_add_return(int i, atomic_t *v) Link Here
60
	unsigned long flags;
60
	unsigned long flags;
61
	int temp;
61
	int temp;
62
62
63
	local_irq_save(flags);
63
	local_irq_save_hw(flags);
64
	temp = v->counter;
64
	temp = v->counter;
65
	temp += i;
65
	temp += i;
66
	v->counter = temp;
66
	v->counter = temp;
67
	local_irq_restore(flags);
67
	local_irq_restore_hw(flags);
68
68
69
	return temp;
69
	return temp;
70
}
70
}
Lines 82-92 static inline int atomic_sub_return(int i, atomic_t *v) Link Here
82
	unsigned long flags;
82
	unsigned long flags;
83
	int temp;
83
	int temp;
84
84
85
	local_irq_save(flags);
85
	local_irq_save_hw(flags);
86
	temp = v->counter;
86
	temp = v->counter;
87
	temp -= i;
87
	temp -= i;
88
	v->counter = temp;
88
	v->counter = temp;
89
	local_irq_restore(flags);
89
	local_irq_restore_hw(flags);
90
90
91
	return temp;
91
	return temp;
92
}
92
}
Lines 139-147 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) Link Here
139
	unsigned long flags;
139
	unsigned long flags;
140
140
141
	mask = ~mask;
141
	mask = ~mask;
142
	local_irq_save(flags);
142
	local_irq_save_hw(flags);
143
	*addr &= mask;
143
	*addr &= mask;
144
	local_irq_restore(flags);
144
	local_irq_restore_hw(flags);
145
}
145
}
146
146
147
#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
147
#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
(-)a/include/asm-generic/bitops/atomic.h (-4 / +4 lines)
Lines 21-40 extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; Link Here
21
 * this is the substitute */
21
 * this is the substitute */
22
#define _atomic_spin_lock_irqsave(l,f) do {	\
22
#define _atomic_spin_lock_irqsave(l,f) do {	\
23
	raw_spinlock_t *s = ATOMIC_HASH(l);	\
23
	raw_spinlock_t *s = ATOMIC_HASH(l);	\
24
	local_irq_save(f);			\
24
	local_irq_save_hw(f);			\
25
	__raw_spin_lock(s);			\
25
	__raw_spin_lock(s);			\
26
} while(0)
26
} while(0)
27
27
28
#define _atomic_spin_unlock_irqrestore(l,f) do {	\
28
#define _atomic_spin_unlock_irqrestore(l,f) do {	\
29
	raw_spinlock_t *s = ATOMIC_HASH(l);		\
29
	raw_spinlock_t *s = ATOMIC_HASH(l);		\
30
	__raw_spin_unlock(s);				\
30
	__raw_spin_unlock(s);				\
31
	local_irq_restore(f);				\
31
	local_irq_restore_hw(f);			\
32
} while(0)
32
} while(0)
33
33
34
34
35
#else
35
#else
36
#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
36
#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save_hw(f); } while (0)
37
#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
37
#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore_hw(f); } while (0)
38
#endif
38
#endif
39
39
40
/*
40
/*
(-)a/include/asm-generic/cmpxchg-local.h (-4 / +4 lines)
Lines 20-26 static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, Link Here
20
	if (size == 8 && sizeof(unsigned long) != 8)
20
	if (size == 8 && sizeof(unsigned long) != 8)
21
		wrong_size_cmpxchg(ptr);
21
		wrong_size_cmpxchg(ptr);
22
22
23
	local_irq_save(flags);
23
	local_irq_save_hw(flags);
24
	switch (size) {
24
	switch (size) {
25
	case 1: prev = *(u8 *)ptr;
25
	case 1: prev = *(u8 *)ptr;
26
		if (prev == old)
26
		if (prev == old)
Lines 41-47 static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, Link Here
41
	default:
41
	default:
42
		wrong_size_cmpxchg(ptr);
42
		wrong_size_cmpxchg(ptr);
43
	}
43
	}
44
	local_irq_restore(flags);
44
	local_irq_restore_hw(flags);
45
	return prev;
45
	return prev;
46
}
46
}
47
47
Lines 54-64 static inline u64 __cmpxchg64_local_generic(volatile void *ptr, Link Here
54
	u64 prev;
54
	u64 prev;
55
	unsigned long flags;
55
	unsigned long flags;
56
56
57
	local_irq_save(flags);
57
	local_irq_save_hw(flags);
58
	prev = *(u64 *)ptr;
58
	prev = *(u64 *)ptr;
59
	if (prev == old)
59
	if (prev == old)
60
		*(u64 *)ptr = new;
60
		*(u64 *)ptr = new;
61
	local_irq_restore(flags);
61
	local_irq_restore_hw(flags);
62
	return prev;
62
	return prev;
63
}
63
}
64
64
(-)a/include/asm-generic/percpu.h (+15 lines)
Lines 56-61 extern unsigned long __per_cpu_offset[NR_CPUS]; Link Here
56
#define __raw_get_cpu_var(var) \
56
#define __raw_get_cpu_var(var) \
57
	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
57
	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
58
58
59
#ifdef CONFIG_IPIPE
60
#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP)
61
extern int __ipipe_check_percpu_access(void);
62
#define __ipipe_local_cpu_offset				\
63
	({							\
64
		WARN_ON_ONCE(__ipipe_check_percpu_access());	\
65
		__my_cpu_offset;				\
66
	})
67
#else
68
#define __ipipe_local_cpu_offset  __my_cpu_offset
69
#endif
70
#define __ipipe_get_cpu_var(var) \
71
	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __ipipe_local_cpu_offset))
72
#endif /* CONFIG_IPIPE */
59
73
60
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
74
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
61
extern void setup_per_cpu_areas(void);
75
extern void setup_per_cpu_areas(void);
Lines 66-71 extern void setup_per_cpu_areas(void); Link Here
66
#define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))
80
#define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))
67
#define __get_cpu_var(var)			per_cpu_var(var)
81
#define __get_cpu_var(var)			per_cpu_var(var)
68
#define __raw_get_cpu_var(var)			per_cpu_var(var)
82
#define __raw_get_cpu_var(var)			per_cpu_var(var)
83
#define __ipipe_get_cpu_var(var)		__raw_get_cpu_var(var)
69
84
70
#endif	/* SMP */
85
#endif	/* SMP */
71
86
(-)a/include/linux/hardirq.h (-16 / +20 lines)
Lines 183-206 extern void irq_enter(void); Link Here
183
 */
183
 */
184
extern void irq_exit(void);
184
extern void irq_exit(void);
185
185
186
#define nmi_enter()						\
186
#define nmi_enter()							\
187
	do {							\
187
	do {								\
188
		ftrace_nmi_enter();				\
188
		if (likely(!ipipe_test_foreign_stack())) {		\
189
		BUG_ON(in_nmi());				\
189
			ftrace_nmi_enter();				\
190
		add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
190
			BUG_ON(in_nmi());				\
191
		lockdep_off();					\
191
			add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
192
		rcu_nmi_enter();				\
192
			lockdep_off();					\
193
		trace_hardirq_enter();				\
193
			rcu_nmi_enter();				\
194
			trace_hardirq_enter();				\
195
		}							\
194
	} while (0)
196
	} while (0)
195
197
196
#define nmi_exit()						\
198
#define nmi_exit()							\
197
	do {							\
199
	do {								\
198
		trace_hardirq_exit();				\
200
		if (likely(!ipipe_test_foreign_stack())) {		\
199
		rcu_nmi_exit();					\
201
			trace_hardirq_exit();				\
200
		lockdep_on();					\
202
			rcu_nmi_exit();					\
201
		BUG_ON(!in_nmi());				\
203
			lockdep_on();					\
202
		sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
204
			BUG_ON(!in_nmi());				\
203
		ftrace_nmi_exit();				\
205
			sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
206
			ftrace_nmi_exit();				\
207
		}							\
204
	} while (0)
208
	} while (0)
205
209
206
#endif /* LINUX_HARDIRQ_H */
210
#endif /* LINUX_HARDIRQ_H */
(-)a/include/linux/ipipe.h (+690 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe.h
3
 *
4
 * Copyright (C) 2002-2007 Philippe Gerum.
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 * USA; either version 2 of the License, or (at your option) any later
10
 * version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_H
23
#define __LINUX_IPIPE_H
24
25
#include <linux/spinlock.h>
26
#include <linux/cache.h>
27
#include <linux/percpu.h>
28
#include <linux/mutex.h>
29
#include <linux/linkage.h>
30
#include <linux/ipipe_base.h>
31
#include <linux/ipipe_compat.h>
32
#include <asm/ipipe.h>
33
34
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
35
36
#include <linux/cpumask.h>
37
#include <asm/system.h>
38
39
static inline int ipipe_disable_context_check(int cpu)
40
{
41
	return xchg(&per_cpu(ipipe_percpu_context_check, cpu), 0);
42
}
43
44
static inline void ipipe_restore_context_check(int cpu, int old_state)
45
{
46
	per_cpu(ipipe_percpu_context_check, cpu) = old_state;
47
}
48
49
static inline void ipipe_context_check_off(void)
50
{
51
	int cpu;
52
	for_each_online_cpu(cpu)
53
		per_cpu(ipipe_percpu_context_check, cpu) = 0;
54
}
55
56
#else	/* !CONFIG_IPIPE_DEBUG_CONTEXT */
57
58
static inline int ipipe_disable_context_check(int cpu)
59
{
60
	return 0;
61
}
62
63
static inline void ipipe_restore_context_check(int cpu, int old_state) { }
64
65
static inline void ipipe_context_check_off(void) { }
66
67
#endif	/* !CONFIG_IPIPE_DEBUG_CONTEXT */
68
69
#ifdef CONFIG_IPIPE
70
71
#define IPIPE_VERSION_STRING	IPIPE_ARCH_STRING
72
#define IPIPE_RELEASE_NUMBER	((IPIPE_MAJOR_NUMBER << 16) | \
73
				 (IPIPE_MINOR_NUMBER <<  8) | \
74
				 (IPIPE_PATCH_NUMBER))
75
76
#ifndef BROKEN_BUILTIN_RETURN_ADDRESS
77
#define __BUILTIN_RETURN_ADDRESS0 ((unsigned long)__builtin_return_address(0))
78
#define __BUILTIN_RETURN_ADDRESS1 ((unsigned long)__builtin_return_address(1))
79
#endif /* !BUILTIN_RETURN_ADDRESS */
80
81
#define IPIPE_ROOT_PRIO		100
82
#define IPIPE_ROOT_ID		0
83
#define IPIPE_ROOT_NPTDKEYS	4	/* Must be <= BITS_PER_LONG */
84
85
#define IPIPE_RESET_TIMER	0x1
86
#define IPIPE_GRAB_TIMER	0x2
87
88
/* Global domain flags */
89
#define IPIPE_SPRINTK_FLAG	0	/* Synchronous printk() allowed */
90
#define IPIPE_AHEAD_FLAG	1	/* Domain always heads the pipeline */
91
92
/* Interrupt control bits */
93
#define IPIPE_HANDLE_FLAG	0
94
#define IPIPE_PASS_FLAG		1
95
#define IPIPE_ENABLE_FLAG	2
96
#define IPIPE_DYNAMIC_FLAG	IPIPE_HANDLE_FLAG
97
#define IPIPE_STICKY_FLAG	3
98
#define IPIPE_SYSTEM_FLAG	4
99
#define IPIPE_LOCK_FLAG		5
100
#define IPIPE_WIRED_FLAG	6
101
#define IPIPE_EXCLUSIVE_FLAG	7
102
103
#define IPIPE_HANDLE_MASK	(1 << IPIPE_HANDLE_FLAG)
104
#define IPIPE_PASS_MASK		(1 << IPIPE_PASS_FLAG)
105
#define IPIPE_ENABLE_MASK	(1 << IPIPE_ENABLE_FLAG)
106
#define IPIPE_DYNAMIC_MASK	IPIPE_HANDLE_MASK
107
#define IPIPE_STICKY_MASK	(1 << IPIPE_STICKY_FLAG)
108
#define IPIPE_SYSTEM_MASK	(1 << IPIPE_SYSTEM_FLAG)
109
#define IPIPE_LOCK_MASK		(1 << IPIPE_LOCK_FLAG)
110
#define IPIPE_WIRED_MASK	(1 << IPIPE_WIRED_FLAG)
111
#define IPIPE_EXCLUSIVE_MASK	(1 << IPIPE_EXCLUSIVE_FLAG)
112
113
#define IPIPE_DEFAULT_MASK	(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
114
#define IPIPE_STDROOT_MASK	(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_SYSTEM_MASK)
115
116
#define IPIPE_EVENT_SELF        0x80000000
117
118
#define IPIPE_NR_CPUS		NR_CPUS
119
120
/* This accessor assumes hw IRQs are off on SMP; allows assignment. */
121
#define __ipipe_current_domain	__ipipe_get_cpu_var(ipipe_percpu_domain)
122
/* This read-only accessor makes sure that hw IRQs are off on SMP. */
123
#define ipipe_current_domain				\
124
	({						\
125
		struct ipipe_domain *__ipd__;		\
126
		unsigned long __flags__;		\
127
		local_irq_save_hw_smp(__flags__);	\
128
		__ipd__ = __ipipe_current_domain;	\
129
		local_irq_restore_hw_smp(__flags__);	\
130
		__ipd__;				\
131
	})
132
133
#define ipipe_virtual_irq_p(irq)	((irq) >= IPIPE_VIRQ_BASE && \
134
					 (irq) < IPIPE_NR_IRQS)
135
136
#define IPIPE_SAME_HANDLER	((ipipe_irq_handler_t)(-1))
137
138
struct irq_desc;
139
140
typedef void (*ipipe_irq_ackfn_t)(unsigned irq, struct irq_desc *desc);
141
142
typedef int (*ipipe_event_handler_t)(unsigned event,
143
				     struct ipipe_domain *from,
144
				     void *data);
145
struct ipipe_domain {
146
147
	int slot;			/* Slot number in percpu domain data array. */
148
	struct list_head p_link;	/* Link in pipeline */
149
	ipipe_event_handler_t evhand[IPIPE_NR_EVENTS]; /* Event handlers. */
150
	unsigned long long evself;	/* Self-monitored event bits. */
151
152
	struct irqdesc {
153
		unsigned long control;
154
		ipipe_irq_ackfn_t acknowledge;
155
		ipipe_irq_handler_t handler;
156
		void *cookie;
157
	} ____cacheline_aligned irqs[IPIPE_NR_IRQS];
158
159
	int priority;
160
	void *pdd;
161
	unsigned long flags;
162
	unsigned domid;
163
	const char *name;
164
	struct mutex mutex;
165
};
166
167
#define IPIPE_HEAD_PRIORITY	(-1) /* For domains always heading the pipeline */
168
169
struct ipipe_domain_attr {
170
171
	unsigned domid;		/* Domain identifier -- Magic value set by caller */
172
	const char *name;	/* Domain name -- Warning: won't be dup'ed! */
173
	int priority;		/* Priority in interrupt pipeline */
174
	void (*entry) (void);	/* Domain entry point */
175
	void *pdd;		/* Per-domain (opaque) data pointer */
176
};
177
178
#define __ipipe_irq_cookie(ipd, irq)		(ipd)->irqs[irq].cookie
179
#define __ipipe_irq_handler(ipd, irq)		(ipd)->irqs[irq].handler
180
#define __ipipe_cpudata_irq_hits(ipd, cpu, irq)	ipipe_percpudom(ipd, irqall, cpu)[irq]
181
182
extern unsigned __ipipe_printk_virq;
183
184
extern unsigned long __ipipe_virtual_irq_map;
185
186
extern struct list_head __ipipe_pipeline;
187
188
extern int __ipipe_event_monitors[];
189
190
/* Private interface */
191
192
void ipipe_init_early(void);
193
194
void ipipe_init(void);
195
196
#ifdef CONFIG_PROC_FS
197
void ipipe_init_proc(void);
198
199
#ifdef CONFIG_IPIPE_TRACE
200
void __ipipe_init_tracer(void);
201
#else /* !CONFIG_IPIPE_TRACE */
202
#define __ipipe_init_tracer()       do { } while(0)
203
#endif /* CONFIG_IPIPE_TRACE */
204
205
#else	/* !CONFIG_PROC_FS */
206
#define ipipe_init_proc()	do { } while(0)
207
#endif	/* CONFIG_PROC_FS */
208
209
void __ipipe_init_stage(struct ipipe_domain *ipd);
210
211
void __ipipe_cleanup_domain(struct ipipe_domain *ipd);
212
213
void __ipipe_add_domain_proc(struct ipipe_domain *ipd);
214
215
void __ipipe_remove_domain_proc(struct ipipe_domain *ipd);
216
217
void __ipipe_flush_printk(unsigned irq, void *cookie);
218
219
void __ipipe_walk_pipeline(struct list_head *pos);
220
221
void __ipipe_pend_irq(unsigned irq, struct list_head *head);
222
223
int __ipipe_dispatch_event(unsigned event, void *data);
224
225
void __ipipe_dispatch_wired_nocheck(struct ipipe_domain *head, unsigned irq);
226
227
void __ipipe_dispatch_wired(struct ipipe_domain *head, unsigned irq);
228
229
void __ipipe_sync_stage(int dovirt);
230
231
void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned irq);
232
233
void __ipipe_lock_irq(struct ipipe_domain *ipd, int cpu, unsigned irq);
234
235
void __ipipe_unlock_irq(struct ipipe_domain *ipd, unsigned irq);
236
237
void __ipipe_pin_range_globally(unsigned long start, unsigned long end);
238
239
/* Must be called hw IRQs off. */
240
static inline void ipipe_irq_lock(unsigned irq)
241
{
242
	__ipipe_lock_irq(__ipipe_current_domain, ipipe_processor_id(), irq);
243
}
244
245
/* Must be called hw IRQs off. */
246
static inline void ipipe_irq_unlock(unsigned irq)
247
{
248
	__ipipe_unlock_irq(__ipipe_current_domain, irq);
249
}
250
251
#ifndef __ipipe_sync_pipeline
252
#define __ipipe_sync_pipeline(dovirt) __ipipe_sync_stage(dovirt)
253
#endif
254
255
#ifndef __ipipe_run_irqtail
256
#define __ipipe_run_irqtail() do { } while(0)
257
#endif
258
259
#define __ipipe_pipeline_head_p(ipd) (&(ipd)->p_link == __ipipe_pipeline.next)
260
261
#define __ipipe_ipending_p(p)	((p)->irqpend_himap != 0)
262
263
/*
264
 * Keep the following as a macro, so that client code could check for
265
 * the support of the invariant pipeline head optimization.
266
 */
267
#define __ipipe_pipeline_head() \
268
	list_entry(__ipipe_pipeline.next, struct ipipe_domain, p_link)
269
270
#define local_irq_enable_hw_cond()		local_irq_enable_hw()
271
#define local_irq_disable_hw_cond()		local_irq_disable_hw()
272
#define local_irq_save_hw_cond(flags)		local_irq_save_hw(flags)
273
#define local_irq_restore_hw_cond(flags)	local_irq_restore_hw(flags)
274
275
#ifdef CONFIG_SMP
276
cpumask_t __ipipe_set_irq_affinity(unsigned irq, cpumask_t cpumask);
277
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask);
278
#define local_irq_save_hw_smp(flags)		local_irq_save_hw(flags)
279
#define local_irq_restore_hw_smp(flags)		local_irq_restore_hw(flags)
280
#else /* !CONFIG_SMP */
281
#define local_irq_save_hw_smp(flags)		do { (void)(flags); } while(0)
282
#define local_irq_restore_hw_smp(flags)		do { } while(0)
283
#endif /* CONFIG_SMP */
284
285
#define local_irq_save_full(vflags, rflags)		\
286
	do {						\
287
		local_irq_save(vflags);			\
288
		local_irq_save_hw(rflags);		\
289
	} while(0)
290
291
#define local_irq_restore_full(vflags, rflags)		\
292
	do {						\
293
		local_irq_restore_hw(rflags);		\
294
		local_irq_restore(vflags);		\
295
	} while(0)
296
297
static inline void __local_irq_restore_nosync(unsigned long x)
298
{
299
	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
300
301
	if (raw_irqs_disabled_flags(x)) {
302
		set_bit(IPIPE_STALL_FLAG, &p->status);
303
		trace_hardirqs_off();
304
	} else {
305
		trace_hardirqs_on();
306
		clear_bit(IPIPE_STALL_FLAG, &p->status);
307
	}
308
}
309
310
static inline void local_irq_restore_nosync(unsigned long x)
311
{
312
	unsigned long flags;
313
	local_irq_save_hw_smp(flags);
314
	__local_irq_restore_nosync(x);
315
	local_irq_restore_hw_smp(flags);
316
}
317
318
#define __ipipe_root_domain_p	(__ipipe_current_domain == ipipe_root_domain)
319
#define ipipe_root_domain_p	(ipipe_current_domain == ipipe_root_domain)
320
321
static inline int __ipipe_event_monitored_p(int ev)
322
{
323
	if (__ipipe_event_monitors[ev] > 0)
324
		return 1;
325
326
	return (ipipe_current_domain->evself & (1LL << ev)) != 0;
327
}
328
329
#define ipipe_sigwake_notify(p)	\
330
do {					\
331
	if (((p)->flags & PF_EVNOTIFY) && __ipipe_event_monitored_p(IPIPE_EVENT_SIGWAKE)) \
332
		__ipipe_dispatch_event(IPIPE_EVENT_SIGWAKE, p);		\
333
} while(0)
334
335
#define ipipe_exit_notify(p)	\
336
do {				\
337
	if (((p)->flags & PF_EVNOTIFY) && __ipipe_event_monitored_p(IPIPE_EVENT_EXIT)) \
338
		__ipipe_dispatch_event(IPIPE_EVENT_EXIT, p);		\
339
} while(0)
340
341
#define ipipe_setsched_notify(p)	\
342
do {					\
343
	if (((p)->flags & PF_EVNOTIFY) && __ipipe_event_monitored_p(IPIPE_EVENT_SETSCHED)) \
344
		__ipipe_dispatch_event(IPIPE_EVENT_SETSCHED, p);	\
345
} while(0)
346
347
#define ipipe_schedule_notify(prev, next)				\
348
do {									\
349
	if ((((prev)->flags|(next)->flags) & PF_EVNOTIFY) &&		\
350
	    __ipipe_event_monitored_p(IPIPE_EVENT_SCHEDULE))		\
351
		__ipipe_dispatch_event(IPIPE_EVENT_SCHEDULE,next);	\
352
} while(0)
353
354
#define ipipe_trap_notify(ex, regs)					\
355
({									\
356
	unsigned long __flags__;					\
357
	int __ret__ = 0;						\
358
	local_irq_save_hw_smp(__flags__);				\
359
	if ((test_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpudom_var(status)) || \
360
	     ((current)->flags & PF_EVNOTIFY)) &&			\
361
	    __ipipe_event_monitored_p(ex)) {				\
362
		local_irq_restore_hw_smp(__flags__);			\
363
		__ret__ = __ipipe_dispatch_event(ex, regs);		\
364
	} else								\
365
		local_irq_restore_hw_smp(__flags__);			\
366
	__ret__;							\
367
})
368
369
static inline void ipipe_init_notify(struct task_struct *p)
370
{
371
	if (__ipipe_event_monitored_p(IPIPE_EVENT_INIT))
372
		__ipipe_dispatch_event(IPIPE_EVENT_INIT, p);
373
}
374
375
struct mm_struct;
376
377
static inline void ipipe_cleanup_notify(struct mm_struct *mm)
378
{
379
	if (__ipipe_event_monitored_p(IPIPE_EVENT_CLEANUP))
380
		__ipipe_dispatch_event(IPIPE_EVENT_CLEANUP, mm);
381
}
382
383
/* Public interface */
384
385
int ipipe_register_domain(struct ipipe_domain *ipd,
386
			  struct ipipe_domain_attr *attr);
387
388
int ipipe_unregister_domain(struct ipipe_domain *ipd);
389
390
void ipipe_suspend_domain(void);
391
392
int ipipe_virtualize_irq(struct ipipe_domain *ipd,
393
			 unsigned irq,
394
			 ipipe_irq_handler_t handler,
395
			 void *cookie,
396
			 ipipe_irq_ackfn_t acknowledge,
397
			 unsigned modemask);
398
399
int ipipe_control_irq(unsigned irq,
400
		      unsigned clrmask,
401
		      unsigned setmask);
402
403
unsigned ipipe_alloc_virq(void);
404
405
int ipipe_free_virq(unsigned virq);
406
407
int ipipe_trigger_irq(unsigned irq);
408
409
static inline void __ipipe_propagate_irq(unsigned irq)
410
{
411
	struct list_head *next = __ipipe_current_domain->p_link.next;
412
	if (next == &ipipe_root.p_link) {
413
		/* Fast path: root must handle all interrupts. */
414
		__ipipe_set_irq_pending(&ipipe_root, irq);
415
		return;
416
	}
417
	__ipipe_pend_irq(irq, next);
418
}
419
420
static inline void __ipipe_schedule_irq(unsigned irq)
421
{
422
	__ipipe_pend_irq(irq, &__ipipe_current_domain->p_link);
423
}
424
425
static inline void __ipipe_schedule_irq_head(unsigned irq)
426
{
427
	__ipipe_set_irq_pending(__ipipe_pipeline_head(), irq);
428
}
429
430
static inline void __ipipe_schedule_irq_root(unsigned irq)
431
{
432
	__ipipe_set_irq_pending(&ipipe_root, irq);
433
}
434
435
static inline void ipipe_propagate_irq(unsigned irq)
436
{
437
	unsigned long flags;
438
439
	local_irq_save_hw(flags);
440
	__ipipe_propagate_irq(irq);
441
	local_irq_restore_hw(flags);
442
}
443
444
static inline void ipipe_schedule_irq(unsigned irq)
445
{
446
	unsigned long flags;
447
448
	local_irq_save_hw(flags);
449
	__ipipe_schedule_irq(irq);
450
	local_irq_restore_hw(flags);
451
}
452
453
static inline void ipipe_schedule_irq_head(unsigned irq)
454
{
455
	unsigned long flags;
456
457
	local_irq_save_hw(flags);
458
	__ipipe_schedule_irq_head(irq);
459
	local_irq_restore_hw(flags);
460
}
461
462
static inline void ipipe_schedule_irq_root(unsigned irq)
463
{
464
	unsigned long flags;
465
466
	local_irq_save_hw(flags);
467
	__ipipe_schedule_irq_root(irq);
468
	local_irq_restore_hw(flags);
469
}
470
471
void ipipe_stall_pipeline_from(struct ipipe_domain *ipd);
472
473
unsigned long ipipe_test_and_stall_pipeline_from(struct ipipe_domain *ipd);
474
475
unsigned long ipipe_test_and_unstall_pipeline_from(struct ipipe_domain *ipd);
476
477
static inline void ipipe_unstall_pipeline_from(struct ipipe_domain *ipd)
478
{
479
	ipipe_test_and_unstall_pipeline_from(ipd);
480
}
481
482
void ipipe_restore_pipeline_from(struct ipipe_domain *ipd,
483
					  unsigned long x);
484
485
static inline unsigned long ipipe_test_pipeline_from(struct ipipe_domain *ipd)
486
{
487
	return test_bit(IPIPE_STALL_FLAG, &ipipe_cpudom_var(ipd, status));
488
}
489
490
static inline void ipipe_stall_pipeline_head(void)
491
{
492
	local_irq_disable_hw();
493
	__set_bit(IPIPE_STALL_FLAG, &ipipe_head_cpudom_var(status));
494
}
495
496
static inline unsigned long ipipe_test_and_stall_pipeline_head(void)
497
{
498
	local_irq_disable_hw();
499
	return __test_and_set_bit(IPIPE_STALL_FLAG, &ipipe_head_cpudom_var(status));
500
}
501
502
void ipipe_unstall_pipeline_head(void);
503
504
void __ipipe_restore_pipeline_head(unsigned long x);
505
506
static inline void ipipe_restore_pipeline_head(unsigned long x)
507
{
508
	/* On some archs, __test_and_set_bit() might return different
509
	 * truth value than test_bit(), so we test the exclusive OR of
510
	 * both statuses, assuming that the lowest bit is always set in
511
	 * the truth value (if this is wrong, the failed optimization will
512
	 * be caught in __ipipe_restore_pipeline_head() if
513
	 * CONFIG_DEBUG_KERNEL is set). */
514
	if ((x ^ test_bit(IPIPE_STALL_FLAG, &ipipe_head_cpudom_var(status))) & 1)
515
		__ipipe_restore_pipeline_head(x);
516
}
517
518
#define ipipe_unstall_pipeline() \
519
	ipipe_unstall_pipeline_from(ipipe_current_domain)
520
521
#define ipipe_test_and_unstall_pipeline() \
522
	ipipe_test_and_unstall_pipeline_from(ipipe_current_domain)
523
524
#define ipipe_test_pipeline() \
525
	ipipe_test_pipeline_from(ipipe_current_domain)
526
527
#define ipipe_test_and_stall_pipeline() \
528
	ipipe_test_and_stall_pipeline_from(ipipe_current_domain)
529
530
#define ipipe_stall_pipeline() \
531
	ipipe_stall_pipeline_from(ipipe_current_domain)
532
533
#define ipipe_restore_pipeline(x) \
534
	ipipe_restore_pipeline_from(ipipe_current_domain, (x))
535
536
void ipipe_init_attr(struct ipipe_domain_attr *attr);
537
538
int ipipe_get_sysinfo(struct ipipe_sysinfo *sysinfo);
539
540
unsigned long ipipe_critical_enter(void (*syncfn) (void));
541
542
void ipipe_critical_exit(unsigned long flags);
543
544
static inline void ipipe_set_printk_sync(struct ipipe_domain *ipd)
545
{
546
	set_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
547
}
548
549
static inline void ipipe_set_printk_async(struct ipipe_domain *ipd)
550
{
551
	clear_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
552
}
553
554
static inline void ipipe_set_foreign_stack(struct ipipe_domain *ipd)
555
{
556
	/* Must be called hw interrupts off. */
557
	__set_bit(IPIPE_NOSTACK_FLAG, &ipipe_cpudom_var(ipd, status));
558
}
559
560
static inline void ipipe_clear_foreign_stack(struct ipipe_domain *ipd)
561
{
562
	/* Must be called hw interrupts off. */
563
	__clear_bit(IPIPE_NOSTACK_FLAG, &ipipe_cpudom_var(ipd, status));
564
}
565
566
static inline int ipipe_test_foreign_stack(void)
567
{
568
	/* Must be called hw interrupts off. */
569
	return test_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpudom_var(status));
570
}
571
572
#ifndef ipipe_safe_current
573
#define ipipe_safe_current()					\
574
({								\
575
	struct task_struct *p;					\
576
	unsigned long flags;					\
577
	local_irq_save_hw_smp(flags);				\
578
	p = ipipe_test_foreign_stack() ? &init_task : current;	\
579
	local_irq_restore_hw_smp(flags);			\
580
	p; \
581
})
582
#endif
583
584
ipipe_event_handler_t ipipe_catch_event(struct ipipe_domain *ipd,
585
					unsigned event,
586
					ipipe_event_handler_t handler);
587
588
cpumask_t ipipe_set_irq_affinity(unsigned irq,
589
				 cpumask_t cpumask);
590
591
int ipipe_send_ipi(unsigned ipi,
592
		   cpumask_t cpumask);
593
594
int ipipe_setscheduler_root(struct task_struct *p,
595
			    int policy,
596
			    int prio);
597
598
int ipipe_reenter_root(struct task_struct *prev,
599
		       int policy,
600
		       int prio);
601
602
int ipipe_alloc_ptdkey(void);
603
604
int ipipe_free_ptdkey(int key);
605
606
int ipipe_set_ptd(int key,
607
		  void *value);
608
609
void *ipipe_get_ptd(int key);
610
611
int ipipe_disable_ondemand_mappings(struct task_struct *tsk);
612
613
static inline void ipipe_nmi_enter(void)
614
{
615
	int cpu = ipipe_processor_id();
616
617
	per_cpu(ipipe_nmi_saved_root, cpu) = ipipe_root_cpudom_var(status);
618
	__set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
619
620
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
621
	per_cpu(ipipe_saved_context_check_state, cpu) =
622
		ipipe_disable_context_check(cpu);
623
#endif /* CONFIG_IPIPE_DEBUG_CONTEXT */
624
}
625
626
static inline void ipipe_nmi_exit(void)
627
{
628
	int cpu = ipipe_processor_id();
629
630
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
631
	ipipe_restore_context_check
632
		(cpu, per_cpu(ipipe_saved_context_check_state, cpu));
633
#endif /* CONFIG_IPIPE_DEBUG_CONTEXT */
634
635
	if (!test_bit(IPIPE_STALL_FLAG, &per_cpu(ipipe_nmi_saved_root, cpu)))
636
		__clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
637
}
638
639
#else	/* !CONFIG_IPIPE */
640
641
#define ipipe_init_early()		do { } while(0)
642
#define ipipe_init()			do { } while(0)
643
#define ipipe_suspend_domain()		do { } while(0)
644
#define ipipe_sigwake_notify(p)		do { } while(0)
645
#define ipipe_setsched_notify(p)	do { } while(0)
646
#define ipipe_init_notify(p)		do { } while(0)
647
#define ipipe_exit_notify(p)		do { } while(0)
648
#define ipipe_cleanup_notify(mm)	do { } while(0)
649
#define ipipe_trap_notify(t,r)		0
650
#define ipipe_init_proc()		do { } while(0)
651
652
static inline void __ipipe_pin_range_globally(unsigned long start,
653
					      unsigned long end)
654
{
655
}
656
657
static inline int ipipe_test_foreign_stack(void)
658
{
659
	return 0;
660
}
661
662
#define local_irq_enable_hw_cond()		do { } while(0)
663
#define local_irq_disable_hw_cond()		do { } while(0)
664
#define local_irq_save_hw_cond(flags)		do { (void)(flags); } while(0)
665
#define local_irq_restore_hw_cond(flags)	do { } while(0)
666
#define local_irq_save_hw_smp(flags)		do { (void)(flags); } while(0)
667
#define local_irq_restore_hw_smp(flags)		do { } while(0)
668
669
#define ipipe_irq_lock(irq)		do { } while(0)
670
#define ipipe_irq_unlock(irq)		do { } while(0)
671
672
#define __ipipe_root_domain_p		1
673
#define ipipe_root_domain_p		1
674
#define ipipe_safe_current		current
675
#define ipipe_processor_id()		smp_processor_id()
676
677
#define ipipe_nmi_enter()		do { } while (0)
678
#define ipipe_nmi_exit()		do { } while (0)
679
680
#define local_irq_disable_head()	local_irq_disable()
681
682
#define local_irq_save_full(vflags, rflags)	do { (void)(vflags); local_irq_save(rflags); } while(0)
683
#define local_irq_restore_full(vflags, rflags)	do { (void)(vflags); local_irq_restore(rflags); } while(0)
684
#define local_irq_restore_nosync(vflags)	local_irq_restore(vflags)
685
686
#define __ipipe_pipeline_head_p(ipd)	1
687
688
#endif	/* CONFIG_IPIPE */
689
690
#endif	/* !__LINUX_IPIPE_H */
(-)a/include/linux/ipipe_base.h (+118 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe_base.h
3
 *
4
 * Copyright (C) 2002-2007 Philippe Gerum.
5
 *               2007 Jan Kiszka.
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
10
 * USA; either version 2 of the License, or (at your option) any later
11
 * version.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21
 */
22
23
#ifndef __LINUX_IPIPE_BASE_H
24
#define __LINUX_IPIPE_BASE_H
25
26
#ifdef CONFIG_IPIPE
27
28
#include <asm/ipipe_base.h>
29
30
#define __bpl_up(x)		(((x)+(BITS_PER_LONG-1)) & ~(BITS_PER_LONG-1))
31
/* Number of virtual IRQs (must be a multiple of BITS_PER_LONG) */
32
#define IPIPE_NR_VIRQS		BITS_PER_LONG
33
/* First virtual IRQ # (must be aligned on BITS_PER_LONG) */
34
#define IPIPE_VIRQ_BASE		__bpl_up(IPIPE_NR_XIRQS)
35
/* Total number of IRQ slots */
36
#define IPIPE_NR_IRQS		(IPIPE_VIRQ_BASE+IPIPE_NR_VIRQS)
37
38
#define IPIPE_IRQ_LOMAPSZ	(IPIPE_NR_IRQS / BITS_PER_LONG)
39
#if IPIPE_IRQ_LOMAPSZ > BITS_PER_LONG
40
/*
41
 * We need a 3-level mapping. This allows us to handle up to 32k IRQ
42
 * vectors on 32bit machines, 256k on 64bit ones.
43
 */
44
#define __IPIPE_3LEVEL_IRQMAP	1
45
#define IPIPE_IRQ_MDMAPSZ	(__bpl_up(IPIPE_IRQ_LOMAPSZ) / BITS_PER_LONG)
46
#else
47
/*
48
 * 2-level mapping is enough. This allows us to handle up to 1024 IRQ
49
 * vectors on 32bit machines, 4096 on 64bit ones.
50
 */
51
#define __IPIPE_2LEVEL_IRQMAP	1
52
#endif
53
54
#define IPIPE_IRQ_DOALL		0
55
#define IPIPE_IRQ_DOVIRT	1
56
57
/* Per-cpu pipeline status */
58
#define IPIPE_STALL_FLAG	0	/* Stalls a pipeline stage -- guaranteed at bit #0 */
59
#define IPIPE_SYNC_FLAG		1	/* The interrupt syncer is running for the domain */
60
#define IPIPE_NOSTACK_FLAG	2	/* Domain currently runs on a foreign stack */
61
62
#define IPIPE_STALL_MASK	(1L << IPIPE_STALL_FLAG)
63
#define IPIPE_SYNC_MASK		(1L << IPIPE_SYNC_FLAG)
64
#define IPIPE_NOSTACK_MASK	(1L << IPIPE_NOSTACK_FLAG)
65
66
typedef void (*ipipe_irq_handler_t)(unsigned int irq,
67
				    void *cookie);
68
69
extern struct ipipe_domain ipipe_root;
70
71
#define ipipe_root_domain (&ipipe_root)
72
73
void __ipipe_unstall_root(void);
74
75
void __ipipe_restore_root(unsigned long x);
76
77
#define ipipe_preempt_disable(flags)		\
78
	do {					\
79
		local_irq_save_hw(flags);	\
80
		if (__ipipe_root_domain_p)	\
81
			preempt_disable();	\
82
	} while (0)
83
84
#define ipipe_preempt_enable(flags)			\
85
	do {						\
86
		if (__ipipe_root_domain_p) {		\
87
			preempt_enable_no_resched();	\
88
			local_irq_restore_hw(flags);	\
89
			preempt_check_resched();	\
90
		} else					\
91
			local_irq_restore_hw(flags);	\
92
	} while (0)
93
 
94
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
95
void ipipe_check_context(struct ipipe_domain *border_ipd);
96
#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */
97
static inline void ipipe_check_context(struct ipipe_domain *border_ipd) { }
98
#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */
99
100
/* Generic features */
101
102
#ifdef CONFIG_GENERIC_CLOCKEVENTS
103
#define __IPIPE_FEATURE_REQUEST_TICKDEV    1
104
#endif
105
#define __IPIPE_FEATURE_DELAYED_ATOMICSW   1
106
#define __IPIPE_FEATURE_FASTPEND_IRQ       1
107
#define __IPIPE_FEATURE_TRACE_EVENT	   1
108
109
#else /* !CONFIG_IPIPE */
110
#define ipipe_preempt_disable(flags)	do { \
111
						preempt_disable(); \
112
						(void)(flags); \
113
					} while (0)
114
#define ipipe_preempt_enable(flags)	preempt_enable()
115
#define ipipe_check_context(ipd)	do { } while(0)
116
#endif	/* CONFIG_IPIPE */
117
118
#endif	/* !__LINUX_IPIPE_BASE_H */
(-)a/include/linux/ipipe_compat.h (+54 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe_compat.h
3
 *
4
 * Copyright (C) 2007 Philippe Gerum.
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 * USA; either version 2 of the License, or (at your option) any later
10
 * version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_COMPAT_H
23
#define __LINUX_IPIPE_COMPAT_H
24
25
#ifdef CONFIG_IPIPE_COMPAT
26
/*
27
 * OBSOLETE: defined only for backward compatibility. Will be removed
28
 * in future releases, please update client code accordingly.
29
 */
30
31
#ifdef CONFIG_SMP
32
#define ipipe_declare_cpuid	int cpuid
33
#define ipipe_load_cpuid()	do { \
34
					cpuid = ipipe_processor_id();	\
35
				} while(0)
36
#define ipipe_lock_cpu(flags)	do { \
37
					local_irq_save_hw(flags); \
38
					cpuid = ipipe_processor_id(); \
39
				} while(0)
40
#define ipipe_unlock_cpu(flags)	local_irq_restore_hw(flags)
41
#define ipipe_get_cpu(flags)	ipipe_lock_cpu(flags)
42
#define ipipe_put_cpu(flags)	ipipe_unlock_cpu(flags)
43
#else /* !CONFIG_SMP */
44
#define ipipe_declare_cpuid	const int cpuid = 0
45
#define ipipe_load_cpuid()	do { } while(0)
46
#define ipipe_lock_cpu(flags)	local_irq_save_hw(flags)
47
#define ipipe_unlock_cpu(flags)	local_irq_restore_hw(flags)
48
#define ipipe_get_cpu(flags)	do { (void)(flags); } while(0)
49
#define ipipe_put_cpu(flags)	do { } while(0)
50
#endif /* CONFIG_SMP */
51
52
#endif /* CONFIG_IPIPE_COMPAT */
53
54
#endif	/* !__LINUX_IPIPE_COMPAT_H */
(-)a/include/linux/ipipe_lock.h (+144 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   include/linux/ipipe_lock.h
3
 *
4
 *   Copyright (C) 2009 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_LOCK_H
23
#define __LINUX_IPIPE_LOCK_H
24
25
typedef struct {
26
	raw_spinlock_t bare_lock;
27
} __ipipe_spinlock_t;
28
29
#define ipipe_lock_p(lock)						\
30
	__builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t *)
31
32
#define common_lock_p(lock)						\
33
	__builtin_types_compatible_p(typeof(lock), spinlock_t *)
34
35
#define bare_lock(lock)	(&((__ipipe_spinlock_t *)(lock))->bare_lock)
36
#define std_lock(lock)	((spinlock_t *)(lock))
37
38
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
39
40
extern int __bad_spinlock_type(void);
41
#define PICK_SPINLOCK_IRQSAVE(lock, flags)				\
42
	do {								\
43
		if (ipipe_lock_p(lock))					\
44
			(flags) = __ipipe_spin_lock_irqsave(bare_lock(lock)); \
45
		else if (common_lock_p(lock))				\
46
			(flags) = _spin_lock_irqsave(std_lock(lock));	\
47
		else __bad_spinlock_type();				\
48
	} while (0)
49
50
#else /* !(CONFIG_SMP || CONFIG_DEBUG_SPINLOCK) */
51
52
#define PICK_SPINLOCK_IRQSAVE(lock, flags)				\
53
	do {								\
54
		if (ipipe_lock_p(lock))					\
55
			(flags) = __ipipe_spin_lock_irqsave(bare_lock(lock)); \
56
		else if (common_lock_p(lock))				\
57
			_spin_lock_irqsave(std_lock(lock), flags);	\
58
	} while (0)
59
60
#endif /* !(CONFIG_SMP || CONFIG_DEBUG_SPINLOCK) */
61
62
#define PICK_SPINUNLOCK_IRQRESTORE(lock, flags)				\
63
	do {								\
64
		if (ipipe_lock_p(lock))					\
65
			__ipipe_spin_unlock_irqrestore(bare_lock(lock), flags); \
66
		else if (common_lock_p(lock))				\
67
			_spin_unlock_irqrestore(std_lock(lock), flags); \
68
	} while (0)
69
70
#define PICK_SPINOP(op, lock)						\
71
	do {								\
72
		if (ipipe_lock_p(lock))					\
73
			__raw_spin##op(bare_lock(lock));		\
74
		else if (common_lock_p(lock))				\
75
			_spin##op(std_lock(lock));			\
76
	} while (0)
77
78
#define PICK_SPINOP_IRQ(op, lock)					\
79
	do {								\
80
		if (ipipe_lock_p(lock))					\
81
			__ipipe_spin##op##_irq(bare_lock(lock));	\
82
		else if (common_lock_p(lock))				\
83
			_spin##op##_irq(std_lock(lock));		\
84
	} while (0)
85
86
#define __raw_spin_lock_init(lock)					\
87
	do {								\
88
		IPIPE_DEFINE_SPINLOCK(__lock__);			\
89
		*((ipipe_spinlock_t *)lock) = __lock__;			\
90
	} while (0)
91
92
#ifdef CONFIG_IPIPE
93
94
#define ipipe_spinlock_t		__ipipe_spinlock_t
95
#define IPIPE_DEFINE_SPINLOCK(x)	ipipe_spinlock_t x = IPIPE_SPIN_LOCK_UNLOCKED
96
#define IPIPE_DECLARE_SPINLOCK(x)	extern ipipe_spinlock_t x
97
#define IPIPE_SPIN_LOCK_UNLOCKED	\
98
	(__ipipe_spinlock_t) {	.bare_lock = __RAW_SPIN_LOCK_UNLOCKED }
99
100
#define spin_lock_irqsave_cond(lock, flags) \
101
	spin_lock_irqsave(lock, flags)
102
103
#define spin_unlock_irqrestore_cond(lock, flags) \
104
	spin_unlock_irqrestore(lock, flags)
105
106
void __ipipe_spin_lock_irq(raw_spinlock_t *lock);
107
108
void __ipipe_spin_unlock_irq(raw_spinlock_t *lock);
109
110
unsigned long __ipipe_spin_lock_irqsave(raw_spinlock_t *lock);
111
112
void __ipipe_spin_unlock_irqrestore(raw_spinlock_t *lock,
113
				    unsigned long x);
114
115
void __ipipe_spin_unlock_irqbegin(ipipe_spinlock_t *lock);
116
117
void __ipipe_spin_unlock_irqcomplete(unsigned long x);
118
119
#else /* !CONFIG_IPIPE */
120
121
#define ipipe_spinlock_t		spinlock_t
122
#define IPIPE_DEFINE_SPINLOCK(x)	DEFINE_SPINLOCK(x)
123
#define IPIPE_DECLARE_SPINLOCK(x)	extern spinlock_t x
124
#define IPIPE_SPIN_LOCK_UNLOCKED        SPIN_LOCK_UNLOCKED
125
126
#define spin_lock_irqsave_cond(lock, flags)		\
127
	do {						\
128
		(void)(flags);				\
129
		spin_lock(lock);			\
130
	} while(0)
131
132
#define spin_unlock_irqrestore_cond(lock, flags)	\
133
	spin_unlock(lock)
134
135
#define __ipipe_spin_lock_irq(lock)		do { } while (0)
136
#define __ipipe_spin_unlock_irq(lock)		do { } while (0)
137
#define __ipipe_spin_lock_irqsave(lock)		0
138
#define __ipipe_spin_unlock_irqrestore(lock, x)	do { (void)(x); } while (0)
139
#define __ipipe_spin_unlock_irqbegin(lock)	do { } while (0)
140
#define __ipipe_spin_unlock_irqcomplete(x)	do { (void)(x); } while (0)
141
142
#endif /* !CONFIG_IPIPE */
143
144
#endif /* !__LINUX_IPIPE_LOCK_H */
(-)a/include/linux/ipipe_percpu.h (+89 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   include/linux/ipipe_percpu.h
3
 *
4
 *   Copyright (C) 2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_PERCPU_H
23
#define __LINUX_IPIPE_PERCPU_H
24
25
#include <asm/percpu.h>
26
#include <asm/ptrace.h>
27
28
struct ipipe_domain;
29
30
struct ipipe_percpu_domain_data {
31
	unsigned long status;	/* <= Must be first in struct. */
32
	unsigned long irqpend_himap;
33
#ifdef __IPIPE_3LEVEL_IRQMAP
34
	unsigned long irqpend_mdmap[IPIPE_IRQ_MDMAPSZ];
35
#endif
36
	unsigned long irqpend_lomap[IPIPE_IRQ_LOMAPSZ];
37
	unsigned long irqheld_map[IPIPE_IRQ_LOMAPSZ];
38
	unsigned long irqall[IPIPE_NR_IRQS];
39
	u64 evsync;
40
};
41
42
/*
43
 * CAREFUL: all accessors based on __raw_get_cpu_var() you may find in
44
 * this file should be used only while hw interrupts are off, to
45
 * prevent from CPU migration regardless of the running domain.
46
 */
47
#ifdef CONFIG_SMP
48
#define ipipe_percpudom_ptr(ipd, cpu)	\
49
	(&per_cpu(ipipe_percpu_darray, cpu)[(ipd)->slot])
50
#define ipipe_cpudom_ptr(ipd)	\
51
	(&__ipipe_get_cpu_var(ipipe_percpu_darray)[(ipd)->slot])
52
#else
53
DECLARE_PER_CPU(struct ipipe_percpu_domain_data *, ipipe_percpu_daddr[CONFIG_IPIPE_DOMAINS]);
54
#define ipipe_percpudom_ptr(ipd, cpu)	\
55
	(per_cpu(ipipe_percpu_daddr, cpu)[(ipd)->slot])
56
#define ipipe_cpudom_ptr(ipd)	\
57
	(__ipipe_get_cpu_var(ipipe_percpu_daddr)[(ipd)->slot])
58
#endif
59
#define ipipe_percpudom(ipd, var, cpu)	(ipipe_percpudom_ptr(ipd, cpu)->var)
60
#define ipipe_cpudom_var(ipd, var)	(ipipe_cpudom_ptr(ipd)->var)
61
62
#define IPIPE_ROOT_SLOT			0
63
#define IPIPE_HEAD_SLOT			(CONFIG_IPIPE_DOMAINS - 1)
64
65
DECLARE_PER_CPU(struct ipipe_percpu_domain_data, ipipe_percpu_darray[CONFIG_IPIPE_DOMAINS]);
66
67
DECLARE_PER_CPU(struct ipipe_domain *, ipipe_percpu_domain);
68
69
DECLARE_PER_CPU(unsigned long, ipipe_nmi_saved_root);
70
71
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
72
DECLARE_PER_CPU(int, ipipe_percpu_context_check);
73
DECLARE_PER_CPU(int, ipipe_saved_context_check_state);
74
#endif
75
76
#define ipipe_root_cpudom_ptr(var)	\
77
	(&__ipipe_get_cpu_var(ipipe_percpu_darray)[IPIPE_ROOT_SLOT])
78
79
#define ipipe_root_cpudom_var(var)	ipipe_root_cpudom_ptr()->var
80
81
#define ipipe_this_cpudom_var(var)	\
82
	ipipe_cpudom_var(__ipipe_current_domain, var)
83
84
#define ipipe_head_cpudom_ptr()		\
85
	(&__ipipe_get_cpu_var(ipipe_percpu_darray)[IPIPE_HEAD_SLOT])
86
87
#define ipipe_head_cpudom_var(var)	ipipe_head_cpudom_ptr()->var
88
89
#endif	/* !__LINUX_IPIPE_PERCPU_H */
(-)a/include/linux/ipipe_tickdev.h (+58 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe_tickdev.h
3
 *
4
 * Copyright (C) 2007 Philippe Gerum.
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 * USA; either version 2 of the License, or (at your option) any later
10
 * version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_TICKDEV_H
23
#define __LINUX_IPIPE_TICKDEV_H
24
25
#if defined(CONFIG_IPIPE) && defined(CONFIG_GENERIC_CLOCKEVENTS)
26
27
#include <linux/clockchips.h>
28
29
struct tick_device;
30
31
struct ipipe_tick_device {
32
33
	void (*emul_set_mode)(enum clock_event_mode,
34
			      struct clock_event_device *cdev);
35
	int (*emul_set_tick)(unsigned long delta,
36
			     struct clock_event_device *cdev);
37
	void (*real_set_mode)(enum clock_event_mode mode,
38
			      struct clock_event_device *cdev);
39
	int (*real_set_tick)(unsigned long delta,
40
			     struct clock_event_device *cdev);
41
	struct tick_device *slave;
42
	unsigned long real_max_delta_ns;
43
	unsigned long real_mult;
44
	int real_shift;
45
};
46
47
int ipipe_request_tickdev(const char *devname,
48
			  void (*emumode)(enum clock_event_mode mode,
49
					  struct clock_event_device *cdev),
50
			  int (*emutick)(unsigned long evt,
51
					 struct clock_event_device *cdev),
52
			  int cpu, unsigned long *tmfreq);
53
54
void ipipe_release_tickdev(int cpu);
55
56
#endif /* CONFIG_IPIPE && CONFIG_GENERIC_CLOCKEVENTS */
57
58
#endif /* !__LINUX_IPIPE_TICKDEV_H */
(-)a/include/linux/ipipe_trace.h (+72 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe_trace.h
3
 *
4
 * Copyright (C) 2005 Luotao Fu.
5
 *               2005-2007 Jan Kiszka.
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
10
 * USA; either version 2 of the License, or (at your option) any later
11
 * version.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21
 */
22
23
#ifndef _LINUX_IPIPE_TRACE_H
24
#define _LINUX_IPIPE_TRACE_H
25
26
#ifdef CONFIG_IPIPE_TRACE
27
28
#include <linux/types.h>
29
30
void ipipe_trace_begin(unsigned long v);
31
void ipipe_trace_end(unsigned long v);
32
void ipipe_trace_freeze(unsigned long v);
33
void ipipe_trace_special(unsigned char special_id, unsigned long v);
34
void ipipe_trace_pid(pid_t pid, short prio);
35
void ipipe_trace_event(unsigned char id, unsigned long delay_tsc);
36
int ipipe_trace_max_reset(void);
37
int ipipe_trace_frozen_reset(void);
38
39
#else /* !CONFIG_IPIPE_TRACE */
40
41
#define ipipe_trace_begin(v)			do { (void)(v); } while(0)
42
#define ipipe_trace_end(v)			do { (void)(v); } while(0)
43
#define ipipe_trace_freeze(v)			do { (void)(v); } while(0)
44
#define ipipe_trace_special(id, v)		do { (void)(id); (void)(v); } while(0)
45
#define ipipe_trace_pid(pid, prio)		do { (void)(pid); (void)(prio); } while(0)
46
#define ipipe_trace_event(id, delay_tsc)	do { (void)(id); (void)(delay_tsc); } while(0)
47
#define ipipe_trace_max_reset()			do { } while(0)
48
#define ipipe_trace_froze_reset()		do { } while(0)
49
50
#endif /* !CONFIG_IPIPE_TRACE */
51
52
#ifdef CONFIG_IPIPE_TRACE_PANIC
53
void ipipe_trace_panic_freeze(void);
54
void ipipe_trace_panic_dump(void);
55
#else
56
static inline void ipipe_trace_panic_freeze(void) { }
57
static inline void ipipe_trace_panic_dump(void) { }
58
#endif
59
60
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
61
#define ipipe_trace_irq_entry(irq)	ipipe_trace_begin(irq)
62
#define ipipe_trace_irq_exit(irq)	ipipe_trace_end(irq)
63
#define ipipe_trace_irqsoff()		ipipe_trace_begin(0x80000000UL)
64
#define ipipe_trace_irqson()		ipipe_trace_end(0x80000000UL)
65
#else
66
#define ipipe_trace_irq_entry(irq)	do { (void)(irq);} while(0)
67
#define ipipe_trace_irq_exit(irq)	do { (void)(irq);} while(0)
68
#define ipipe_trace_irqsoff()		do { } while(0)
69
#define ipipe_trace_irqson()		do { } while(0)
70
#endif
71
72
#endif	/* !__LINUX_IPIPE_TRACE_H */
(-)a/include/linux/irq.h (+14 lines)
Lines 124-129 struct irq_chip { Link Here
124
	void		(*end)(unsigned int irq);
124
	void		(*end)(unsigned int irq);
125
	int		(*set_affinity)(unsigned int irq,
125
	int		(*set_affinity)(unsigned int irq,
126
					const struct cpumask *dest);
126
					const struct cpumask *dest);
127
#ifdef CONFIG_IPIPE
128
	void		(*move)(unsigned int irq);
129
#endif /* CONFIG_IPIPE */
127
	int		(*retrigger)(unsigned int irq);
130
	int		(*retrigger)(unsigned int irq);
128
	int		(*set_type)(unsigned int irq, unsigned int flow_type);
131
	int		(*set_type)(unsigned int irq, unsigned int flow_type);
129
	int		(*set_wake)(unsigned int irq, unsigned int on);
132
	int		(*set_wake)(unsigned int irq, unsigned int on);
Lines 173-178 struct irq_2_iommu; Link Here
173
 * @name:		flow handler name for /proc/interrupts output
176
 * @name:		flow handler name for /proc/interrupts output
174
 */
177
 */
175
struct irq_desc {
178
struct irq_desc {
179
#ifdef CONFIG_IPIPE
180
	void			(*ipipe_ack)(unsigned int irq,
181
					     struct irq_desc *desc);
182
	void			(*ipipe_end)(unsigned int irq,
183
					     struct irq_desc *desc);
184
#endif /* CONFIG_IPIPE */
176
	unsigned int		irq;
185
	unsigned int		irq;
177
	struct timer_rand_state *timer_rand_state;
186
	struct timer_rand_state *timer_rand_state;
178
	unsigned int            *kstat_irqs;
187
	unsigned int            *kstat_irqs;
Lines 346-351 extern void Link Here
346
set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
355
set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
347
			      irq_flow_handler_t handle, const char *name);
356
			      irq_flow_handler_t handle, const char *name);
348
357
358
extern irq_flow_handler_t
359
__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle,
360
		    int is_chained);
361
349
extern void
362
extern void
350
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
363
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
351
		  const char *name);
364
		  const char *name);
Lines 357-362 static inline void __set_irq_handler_unlocked(int irq, Link Here
357
	struct irq_desc *desc;
370
	struct irq_desc *desc;
358
371
359
	desc = irq_to_desc(irq);
372
	desc = irq_to_desc(irq);
373
	handler = __fixup_irq_handler(desc, handler, 0);
360
	desc->handle_irq = handler;
374
	desc->handle_irq = handler;
361
}
375
}
362
376
(-)a/include/linux/kernel.h (-2 / +6 lines)
Lines 14-19 Link Here
14
#include <linux/compiler.h>
14
#include <linux/compiler.h>
15
#include <linux/bitops.h>
15
#include <linux/bitops.h>
16
#include <linux/log2.h>
16
#include <linux/log2.h>
17
#include <linux/ipipe_base.h>
17
#include <linux/typecheck.h>
18
#include <linux/typecheck.h>
18
#include <linux/ratelimit.h>
19
#include <linux/ratelimit.h>
19
#include <linux/dynamic_debug.h>
20
#include <linux/dynamic_debug.h>
Lines 119-127 struct user; Link Here
119
120
120
#ifdef CONFIG_PREEMPT_VOLUNTARY
121
#ifdef CONFIG_PREEMPT_VOLUNTARY
121
extern int _cond_resched(void);
122
extern int _cond_resched(void);
122
# define might_resched() _cond_resched()
123
# define might_resched() do { \
124
		ipipe_check_context(ipipe_root_domain); \
125
		_cond_resched(); \
126
	} while (0)
123
#else
127
#else
124
# define might_resched() do { } while (0)
128
# define might_resched() ipipe_check_context(ipipe_root_domain)
125
#endif
129
#endif
126
130
127
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
131
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
(-)a/include/linux/preempt.h (-2 / +9 lines)
Lines 9-21 Link Here
9
#include <linux/thread_info.h>
9
#include <linux/thread_info.h>
10
#include <linux/linkage.h>
10
#include <linux/linkage.h>
11
#include <linux/list.h>
11
#include <linux/list.h>
12
#include <linux/ipipe_base.h>
12
13
13
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
14
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
14
  extern void add_preempt_count(int val);
15
  extern void add_preempt_count(int val);
15
  extern void sub_preempt_count(int val);
16
  extern void sub_preempt_count(int val);
16
#else
17
#else
17
# define add_preempt_count(val)	do { preempt_count() += (val); } while (0)
18
# define add_preempt_count(val)	do {		\
18
# define sub_preempt_count(val)	do { preempt_count() -= (val); } while (0)
19
    ipipe_check_context(ipipe_root_domain);	\
20
    preempt_count() += (val);			\
21
  } while (0)
22
# define sub_preempt_count(val)	do {		\
23
    ipipe_check_context(ipipe_root_domain);	\
24
    preempt_count() -= (val);			\
25
  } while (0)
19
#endif
26
#endif
20
27
21
#define inc_preempt_count() add_preempt_count(1)
28
#define inc_preempt_count() add_preempt_count(1)
(-)a/include/linux/sched.h (-2 / +30 lines)
Lines 61-66 struct sched_param { Link Here
61
#include <linux/errno.h>
61
#include <linux/errno.h>
62
#include <linux/nodemask.h>
62
#include <linux/nodemask.h>
63
#include <linux/mm_types.h>
63
#include <linux/mm_types.h>
64
#include <linux/ipipe.h>
64
65
65
#include <asm/system.h>
66
#include <asm/system.h>
66
#include <asm/page.h>
67
#include <asm/page.h>
Lines 195-200 extern unsigned long long time_sync_thresh; Link Here
195
#define TASK_DEAD		64
196
#define TASK_DEAD		64
196
#define TASK_WAKEKILL		128
197
#define TASK_WAKEKILL		128
197
#define TASK_WAKING		256
198
#define TASK_WAKING		256
199
#ifdef CONFIG_IPIPE
200
#define TASK_ATOMICSWITCH	512
201
#define TASK_NOWAKEUP		1024
202
#else  /* !CONFIG_IPIPE */
203
#define TASK_ATOMICSWITCH	0
204
#define TASK_NOWAKEUP		0
205
#endif /* CONFIG_IPIPE */
198
206
199
/* Convenience macros for the sake of set_task_state */
207
/* Convenience macros for the sake of set_task_state */
200
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
208
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
Lines 302-307 extern void trap_init(void); Link Here
302
extern void update_process_times(int user);
310
extern void update_process_times(int user);
303
extern void scheduler_tick(void);
311
extern void scheduler_tick(void);
304
312
313
#ifdef CONFIG_IPIPE
314
void update_root_process_times(struct pt_regs *regs);
315
#else  /* !CONFIG_IPIPE */
316
static inline void update_root_process_times(struct pt_regs *regs)
317
{
318
	update_process_times(user_mode(regs));
319
}
320
#endif /* CONFIG_IPIPE */
321
305
extern void sched_show_task(struct task_struct *p);
322
extern void sched_show_task(struct task_struct *p);
306
323
307
#ifdef CONFIG_DETECT_SOFTLOCKUP
324
#ifdef CONFIG_DETECT_SOFTLOCKUP
Lines 349-356 extern signed long schedule_timeout(signed long timeout); Link Here
349
extern signed long schedule_timeout_interruptible(signed long timeout);
366
extern signed long schedule_timeout_interruptible(signed long timeout);
350
extern signed long schedule_timeout_killable(signed long timeout);
367
extern signed long schedule_timeout_killable(signed long timeout);
351
extern signed long schedule_timeout_uninterruptible(signed long timeout);
368
extern signed long schedule_timeout_uninterruptible(signed long timeout);
352
asmlinkage void __schedule(void);
369
asmlinkage int __schedule(void);
353
asmlinkage void schedule(void);
370
asmlinkage int schedule(void);
354
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
371
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
355
372
356
struct nsproxy;
373
struct nsproxy;
Lines 475-480 extern int get_dumpable(struct mm_struct *mm); Link Here
475
#endif
492
#endif
476
					/* leave room for more dump flags */
493
					/* leave room for more dump flags */
477
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
494
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
495
#ifdef CONFIG_IPIPE
496
#define MMF_VM_PINNED		31	/* ondemand load up and COW disabled */
497
#endif
478
498
479
#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
499
#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
480
500
Lines 1496-1501 struct task_struct { Link Here
1496
#endif
1516
#endif
1497
	atomic_t fs_excl;	/* holding fs exclusive resources */
1517
	atomic_t fs_excl;	/* holding fs exclusive resources */
1498
	struct rcu_head rcu;
1518
	struct rcu_head rcu;
1519
#ifdef CONFIG_IPIPE
1520
	void *ptd[IPIPE_ROOT_NPTDKEYS];
1521
#endif
1499
1522
1500
	/*
1523
	/*
1501
	 * cache last used pipe for splice
1524
	 * cache last used pipe for splice
Lines 1736-1741 extern cputime_t task_gtime(struct task_struct *p); Link Here
1736
#define PF_EXITING	0x00000004	/* getting shut down */
1759
#define PF_EXITING	0x00000004	/* getting shut down */
1737
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1760
#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1738
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1761
#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1762
#ifdef CONFIG_IPIPE
1763
#define PF_EVNOTIFY	0x00000020	/* Notify other domains about internal events */
1764
#else
1765
#define PF_EVNOTIFY	0
1766
#endif /* CONFIG_IPIPE */
1739
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1767
#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1740
#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1768
#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1741
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1769
#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
(-)a/include/linux/spinlock.h (-9 / +13 lines)
Lines 90-99 extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); Link Here
90
# include <linux/spinlock_up.h>
90
# include <linux/spinlock_up.h>
91
#endif
91
#endif
92
92
93
#include <linux/ipipe_lock.h>
94
93
#ifdef CONFIG_DEBUG_SPINLOCK
95
#ifdef CONFIG_DEBUG_SPINLOCK
94
  extern void __spin_lock_init(spinlock_t *lock, const char *name,
96
  extern void __spin_lock_init(spinlock_t *lock, const char *name,
95
			       struct lock_class_key *key);
97
			       struct lock_class_key *key);
96
# define spin_lock_init(lock)					\
98
# define _spin_lock_init(lock)					\
97
do {								\
99
do {								\
98
	static struct lock_class_key __key;			\
100
	static struct lock_class_key __key;			\
99
								\
101
								\
Lines 101-110 do { \ Link Here
101
} while (0)
103
} while (0)
102
104
103
#else
105
#else
104
# define spin_lock_init(lock)					\
106
# define _spin_lock_init(lock)					\
105
	do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
107
	do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
106
#endif
108
#endif
107
109
110
# define spin_lock_init(lock)	PICK_SPINOP(_lock_init, lock)
111
108
#ifdef CONFIG_DEBUG_SPINLOCK
112
#ifdef CONFIG_DEBUG_SPINLOCK
109
  extern void __rwlock_init(rwlock_t *lock, const char *name,
113
  extern void __rwlock_init(rwlock_t *lock, const char *name,
110
			    struct lock_class_key *key);
114
			    struct lock_class_key *key);
Lines 186-192 static inline void smp_mb__after_lock(void) { smp_mb(); } Link Here
186
#define read_trylock(lock)		__cond_lock(lock, _read_trylock(lock))
190
#define read_trylock(lock)		__cond_lock(lock, _read_trylock(lock))
187
#define write_trylock(lock)		__cond_lock(lock, _write_trylock(lock))
191
#define write_trylock(lock)		__cond_lock(lock, _write_trylock(lock))
188
192
189
#define spin_lock(lock)			_spin_lock(lock)
193
#define spin_lock(lock)			PICK_SPINOP(_lock, lock)
190
194
191
#ifdef CONFIG_DEBUG_LOCK_ALLOC
195
#ifdef CONFIG_DEBUG_LOCK_ALLOC
192
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
196
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
Lines 208-214 static inline void smp_mb__after_lock(void) { smp_mb(); } Link Here
208
#define spin_lock_irqsave(lock, flags)			\
212
#define spin_lock_irqsave(lock, flags)			\
209
	do {						\
213
	do {						\
210
		typecheck(unsigned long, flags);	\
214
		typecheck(unsigned long, flags);	\
211
		flags = _spin_lock_irqsave(lock);	\
215
		PICK_SPINLOCK_IRQSAVE(lock, flags);	\
212
	} while (0)
216
	} while (0)
213
#define read_lock_irqsave(lock, flags)			\
217
#define read_lock_irqsave(lock, flags)			\
214
	do {						\
218
	do {						\
Lines 240-246 static inline void smp_mb__after_lock(void) { smp_mb(); } Link Here
240
#define spin_lock_irqsave(lock, flags)			\
244
#define spin_lock_irqsave(lock, flags)			\
241
	do {						\
245
	do {						\
242
		typecheck(unsigned long, flags);	\
246
		typecheck(unsigned long, flags);	\
243
		_spin_lock_irqsave(lock, flags);	\
247
		PICK_SPINLOCK_IRQSAVE(lock, flags);	\
244
	} while (0)
248
	} while (0)
245
#define read_lock_irqsave(lock, flags)			\
249
#define read_lock_irqsave(lock, flags)			\
246
	do {						\
250
	do {						\
Lines 257-279 static inline void smp_mb__after_lock(void) { smp_mb(); } Link Here
257
261
258
#endif
262
#endif
259
263
260
#define spin_lock_irq(lock)		_spin_lock_irq(lock)
264
#define spin_lock_irq(lock)		PICK_SPINOP_IRQ(_lock, lock)
261
#define spin_lock_bh(lock)		_spin_lock_bh(lock)
265
#define spin_lock_bh(lock)		_spin_lock_bh(lock)
262
#define read_lock_irq(lock)		_read_lock_irq(lock)
266
#define read_lock_irq(lock)		_read_lock_irq(lock)
263
#define read_lock_bh(lock)		_read_lock_bh(lock)
267
#define read_lock_bh(lock)		_read_lock_bh(lock)
264
#define write_lock_irq(lock)		_write_lock_irq(lock)
268
#define write_lock_irq(lock)		_write_lock_irq(lock)
265
#define write_lock_bh(lock)		_write_lock_bh(lock)
269
#define write_lock_bh(lock)		_write_lock_bh(lock)
266
#define spin_unlock(lock)		_spin_unlock(lock)
270
#define spin_unlock(lock)		PICK_SPINOP(_unlock, lock)
267
#define read_unlock(lock)		_read_unlock(lock)
271
#define read_unlock(lock)		_read_unlock(lock)
268
#define write_unlock(lock)		_write_unlock(lock)
272
#define write_unlock(lock)		_write_unlock(lock)
269
#define spin_unlock_irq(lock)		_spin_unlock_irq(lock)
273
#define spin_unlock_irq(lock)		PICK_SPINOP_IRQ(_unlock, lock)
270
#define read_unlock_irq(lock)		_read_unlock_irq(lock)
274
#define read_unlock_irq(lock)		_read_unlock_irq(lock)
271
#define write_unlock_irq(lock)		_write_unlock_irq(lock)
275
#define write_unlock_irq(lock)		_write_unlock_irq(lock)
272
276
273
#define spin_unlock_irqrestore(lock, flags)		\
277
#define spin_unlock_irqrestore(lock, flags)		\
274
	do {						\
278
	do {						\
275
		typecheck(unsigned long, flags);	\
279
		typecheck(unsigned long, flags);	\
276
		_spin_unlock_irqrestore(lock, flags);	\
280
		PICK_SPINUNLOCK_IRQRESTORE(lock, flags);	\
277
	} while (0)
281
	} while (0)
278
#define spin_unlock_bh(lock)		_spin_unlock_bh(lock)
282
#define spin_unlock_bh(lock)		_spin_unlock_bh(lock)
279
283
(-)a/include/linux/spinlock_api_smp.h (-2 / +4 lines)
Lines 229-235 static inline int __write_trylock(rwlock_t *lock) Link Here
229
 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
229
 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
230
 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
230
 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
231
 */
231
 */
232
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
232
#if !defined(CONFIG_GENERIC_LOCKBREAK) ||   \
233
	defined(CONFIG_DEBUG_LOCK_ALLOC) || \
234
	defined(CONFIG_IPIPE)
233
235
234
static inline void __read_lock(rwlock_t *lock)
236
static inline void __read_lock(rwlock_t *lock)
235
{
237
{
Lines 250-256 static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) Link Here
250
	 * _raw_spin_lock_flags() code, because lockdep assumes
252
	 * _raw_spin_lock_flags() code, because lockdep assumes
251
	 * that interrupts are not re-enabled during lock-acquire:
253
	 * that interrupts are not re-enabled during lock-acquire:
252
	 */
254
	 */
253
#ifdef CONFIG_LOCKDEP
255
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_IPIPE)
254
	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
256
	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
255
#else
257
#else
256
	_raw_spin_lock_flags(lock, &flags);
258
	_raw_spin_lock_flags(lock, &flags);
(-)a/init/Kconfig (+1 lines)
Lines 78-83 config INIT_ENV_ARG_LIMIT Link Here
78
78
79
config LOCALVERSION
79
config LOCALVERSION
80
	string "Local version - append to kernel release"
80
	string "Local version - append to kernel release"
81
	default "-ipipe"
81
	help
82
	help
82
	  Append an extra string to the end of your kernel version.
83
	  Append an extra string to the end of your kernel version.
83
	  This will show up when you type uname, for example.
84
	  This will show up when you type uname, for example.
(-)a/init/main.c (-1 / +8 lines)
Lines 530-536 asmlinkage void __init start_kernel(void) Link Here
530
530
531
	cgroup_init_early();
531
	cgroup_init_early();
532
532
533
	local_irq_disable();
533
	local_irq_disable_hw();
534
	early_boot_irqs_off();
534
	early_boot_irqs_off();
535
	early_init_irq_lock_class();
535
	early_init_irq_lock_class();
536
536
Lines 565-570 asmlinkage void __init start_kernel(void) Link Here
565
	pidhash_init();
565
	pidhash_init();
566
	vfs_caches_init_early();
566
	vfs_caches_init_early();
567
	sort_main_extable();
567
	sort_main_extable();
568
	ipipe_init_early();
568
	trap_init();
569
	trap_init();
569
	mm_init();
570
	mm_init();
570
	/*
571
	/*
Lines 593-598 asmlinkage void __init start_kernel(void) Link Here
593
	softirq_init();
594
	softirq_init();
594
	timekeeping_init();
595
	timekeeping_init();
595
	time_init();
596
	time_init();
597
	/*
598
	 * We need to wait for the interrupt and time subsystems to be
599
	 * initialized before enabling the pipeline.
600
	 */
601
	ipipe_init();
596
	profile_init();
602
	profile_init();
597
	if (!irqs_disabled())
603
	if (!irqs_disabled())
598
		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
604
		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
Lines 774-779 static void __init do_basic_setup(void) Link Here
774
	init_tmpfs();
780
	init_tmpfs();
775
	driver_init();
781
	driver_init();
776
	init_irq_proc();
782
	init_irq_proc();
783
  	ipipe_init_proc();
777
	do_ctors();
784
	do_ctors();
778
	do_initcalls();
785
	do_initcalls();
779
}
786
}
(-)a/kernel/Makefile (+1 lines)
Lines 83-88 obj-$(CONFIG_TREE_RCU) += rcutree.o Link Here
83
obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
83
obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
84
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
84
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
85
obj-$(CONFIG_RELAY) += relay.o
85
obj-$(CONFIG_RELAY) += relay.o
86
obj-$(CONFIG_IPIPE) += ipipe/
86
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
87
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
87
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
88
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
88
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
89
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
(-)a/kernel/exit.c (+35 lines)
Lines 963-968 NORET_TYPE void do_exit(long code) Link Here
963
		acct_process();
963
		acct_process();
964
	trace_sched_process_exit(tsk);
964
	trace_sched_process_exit(tsk);
965
965
966
  	ipipe_exit_notify(tsk);
966
	exit_sem(tsk);
967
	exit_sem(tsk);
967
	exit_files(tsk);
968
	exit_files(tsk);
968
	exit_fs(tsk);
969
	exit_fs(tsk);
Lines 1766-1768 SYSCALL_DEFINE3(waitpid, pid_t, pid, int Link Here
1766
}
1767
}
1767
1768
1768
#endif
1769
#endif
1770
1771
void rt_daemonize(void)
1772
{
1773
	sigset_t blocked;
1774
1775
	/*
1776
	 * We don't want to have TIF_FREEZE set if the system-wide hibernation
1777
	 * or suspend transition begins right now.
1778
	 */
1779
	current->flags |= (PF_NOFREEZE | PF_KTHREAD);
1780
1781
	if (current->nsproxy != &init_nsproxy) {
1782
		get_nsproxy(&init_nsproxy);
1783
		switch_task_namespaces(current, &init_nsproxy);
1784
	}
1785
	set_special_pids(&init_struct_pid);
1786
	proc_clear_tty(current);
1787
1788
	/* Block and flush all signals */
1789
	sigfillset(&blocked);
1790
	sigprocmask(SIG_BLOCK, &blocked, NULL);
1791
	flush_signals(current);
1792
1793
	/* Become as one with the init task */
1794
1795
	daemonize_fs_struct();
1796
	exit_files(current);
1797
	current->files = init_task.files;
1798
	atomic_inc(&current->files->count);
1799
1800
	reparent_to_kthreadd();
1801
}
1802
1803
EXPORT_SYMBOL(rt_daemonize);
(-)a/kernel/fork.c (-1 / +8 lines)
Lines 511-516 void mmput(struct mm_struct *mm) Link Here
511
		exit_aio(mm);
511
		exit_aio(mm);
512
		ksm_exit(mm);
512
		ksm_exit(mm);
513
		exit_mmap(mm);
513
		exit_mmap(mm);
514
 		ipipe_cleanup_notify(mm);
514
		set_mm_exe_file(mm, NULL);
515
		set_mm_exe_file(mm, NULL);
515
		if (!list_empty(&mm->mmlist)) {
516
		if (!list_empty(&mm->mmlist)) {
516
			spin_lock(&mmlist_lock);
517
			spin_lock(&mmlist_lock);
Lines 918-924 static void copy_flags(unsigned long clone_flags, struct task_struct *p) Link Here
918
{
919
{
919
	unsigned long new_flags = p->flags;
920
	unsigned long new_flags = p->flags;
920
921
921
	new_flags &= ~PF_SUPERPRIV;
922
 	new_flags &= ~(PF_SUPERPRIV | PF_EVNOTIFY);
922
	new_flags |= PF_FORKNOEXEC;
923
	new_flags |= PF_FORKNOEXEC;
923
	new_flags |= PF_STARTING;
924
	new_flags |= PF_STARTING;
924
	p->flags = new_flags;
925
	p->flags = new_flags;
Lines 1303-1308 static struct task_struct *copy_process(unsigned long clone_flags, Link Here
1303
	write_unlock_irq(&tasklist_lock);
1304
	write_unlock_irq(&tasklist_lock);
1304
	proc_fork_connector(p);
1305
	proc_fork_connector(p);
1305
	cgroup_post_fork(p);
1306
	cgroup_post_fork(p);
1307
#ifdef CONFIG_IPIPE
1308
	memset(p->ptd, 0, sizeof(p->ptd));
1309
#endif /* CONFIG_IPIPE */
1306
	perf_event_fork(p);
1310
	perf_event_fork(p);
1307
	return p;
1311
	return p;
1308
1312
Lines 1700-1710 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) Link Here
1700
		}
1704
		}
1701
1705
1702
		if (new_mm) {
1706
		if (new_mm) {
1707
			unsigned long flags;
1703
			mm = current->mm;
1708
			mm = current->mm;
1704
			active_mm = current->active_mm;
1709
			active_mm = current->active_mm;
1705
			current->mm = new_mm;
1710
			current->mm = new_mm;
1711
			ipipe_mm_switch_protect(flags);
1706
			current->active_mm = new_mm;
1712
			current->active_mm = new_mm;
1707
			activate_mm(active_mm, new_mm);
1713
			activate_mm(active_mm, new_mm);
1714
			ipipe_mm_switch_unprotect(flags);
1708
			new_mm = mm;
1715
			new_mm = mm;
1709
		}
1716
		}
1710
1717
(-)a/kernel/ipipe/Kconfig (+35 lines)
Line 0 Link Here
1
config IPIPE
2
	bool "Interrupt pipeline"
3
	default y
4
	---help---
5
	  Activate this option if you want the interrupt pipeline to be
6
	  compiled in.
7
8
config IPIPE_DOMAINS
9
	int "Max domains"
10
	depends on IPIPE
11
	default 4
12
	---help---
13
	The maximum number of I-pipe domains to run concurrently.
14
15
config IPIPE_COMPAT
16
	bool "Maintain code compatibility with older releases"
17
	depends on IPIPE
18
	default y
19
	---help---
20
	Activate this option if you want the compatibility code to be
21
	defined, so that older I-pipe clients may use obsolete
22
	constructs. WARNING: obsolete code will be eventually
23
	deprecated in future I-pipe releases, and removed from the
24
	compatibility support as time passes. Please fix I-pipe
25
	clients to get rid of such uses as soon as possible.
26
27
config IPIPE_DELAYED_ATOMICSW
28
       bool
29
       depends on IPIPE
30
       default n
31
32
config IPIPE_UNMASKED_CONTEXT_SWITCH
33
       bool
34
       depends on IPIPE
35
       default n
(-)a/kernel/ipipe/Kconfig.debug (+97 lines)
Line 0 Link Here
1
config IPIPE_DEBUG
2
	bool "I-pipe debugging"
3
	depends on IPIPE
4
5
config IPIPE_DEBUG_CONTEXT
6
	bool "Check for illicit cross-domain calls"
7
	depends on IPIPE_DEBUG
8
	default y
9
	---help---
10
	  Enable this feature to arm checkpoints in the kernel that
11
	  verify the correct invocation context. On entry of critical
12
	  Linux services a warning is issued if the caller is not
13
	  running over the root domain.
14
15
config IPIPE_DEBUG_INTERNAL
16
	bool "Enable internal debug checks"
17
	depends on IPIPE_DEBUG
18
	default y
19
	---help---
20
	  When this feature is enabled, I-pipe will perform internal
21
	  consistency checks of its subsystems, e.g. on per-cpu variable
22
	  access.
23
24
config IPIPE_TRACE
25
	bool "Latency tracing"
26
	depends on IPIPE_DEBUG
27
	select FRAME_POINTER
28
	select KALLSYMS
29
	select PROC_FS
30
	---help---
31
	  Activate this option if you want to use per-function tracing of
32
	  the kernel. The tracer will collect data via instrumentation
33
	  features like the one below or with the help of explicite calls
34
	  of ipipe_trace_xxx(). See include/linux/ipipe_trace.h for the
35
	  in-kernel tracing API. The collected data and runtime control
36
	  is available via /proc/ipipe/trace/*.
37
38
if IPIPE_TRACE
39
40
config IPIPE_TRACE_ENABLE
41
	bool "Enable tracing on boot"
42
	default y
43
	---help---
44
	  Disable this option if you want to arm the tracer after booting
45
	  manually ("echo 1 > /proc/ipipe/tracer/enable"). This can reduce
46
	  boot time on slow embedded devices due to the tracer overhead.
47
48
config IPIPE_TRACE_MCOUNT
49
	bool "Instrument function entries"
50
	default y
51
	select FUNCTION_TRACER
52
	select TRACING
53
	select CONTEXT_SWITCH_TRACER
54
	select FTRACE_MCOUNT_RECORD
55
 	select DYNAMIC_FTRACE
56
	---help---
57
	  When enabled, records every kernel function entry in the tracer
58
	  log. While this slows down the system noticeably, it provides
59
	  the highest level of information about the flow of events.
60
	  However, it can be switch off in order to record only explicit
61
	  I-pipe trace points.
62
63
config IPIPE_TRACE_IRQSOFF
64
	bool "Trace IRQs-off times"
65
	default y
66
	---help---
67
	  Activate this option if I-pipe shall trace the longest path
68
	  with hard-IRQs switched off.
69
70
config IPIPE_TRACE_SHIFT
71
	int "Depth of trace log (14 => 16Kpoints, 15 => 32Kpoints)"
72
	range 10 18
73
	default 14
74
	---help---
75
	  The number of trace points to hold tracing data for each
76
	  trace path, as a power of 2.
77
78
config IPIPE_TRACE_VMALLOC
79
	bool "Use vmalloc'ed trace buffer"
80
	default y if EMBEDDED
81
	---help---
82
	  Instead of reserving static kernel data, the required buffer
83
	  is allocated via vmalloc during boot-up when this option is
84
	  enabled. This can help to start systems that are low on memory,
85
	  but it slightly degrades overall performance. Try this option
86
	  when a traced kernel hangs unexpectedly at boot time.
87
88
config IPIPE_TRACE_PANIC
89
	bool "Enable panic back traces"
90
	default y
91
	---help---
92
	  Provides services to freeze and dump a back trace on panic
93
	  situations. This is used on IPIPE_DEBUG_CONTEXT exceptions
94
	  as well as ordinary kernel oopses. You can control the number
95
	  of printed back trace points via /proc/ipipe/trace.
96
97
endif
(-)a/kernel/ipipe/Makefile (+3 lines)
Line 0 Link Here
1
2
obj-$(CONFIG_IPIPE)	+= core.o
3
obj-$(CONFIG_IPIPE_TRACE) += tracer.o
(-)a/kernel/ipipe/core.c (+1955 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * linux/kernel/ipipe/core.c
3
 *
4
 * Copyright (C) 2002-2005 Philippe Gerum.
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 * USA; either version 2 of the License, or (at your option) any later
10
 * version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 *
21
 * Architecture-independent I-PIPE core support.
22
 */
23
24
#include <linux/version.h>
25
#include <linux/module.h>
26
#include <linux/init.h>
27
#include <linux/kernel.h>
28
#include <linux/sched.h>
29
#include <linux/sched.h>
30
#include <linux/kallsyms.h>
31
#include <linux/interrupt.h>
32
#include <linux/bitops.h>
33
#include <linux/tick.h>
34
#include <linux/prefetch.h>
35
#ifdef CONFIG_PROC_FS
36
#include <linux/proc_fs.h>
37
#include <linux/seq_file.h>
38
#endif	/* CONFIG_PROC_FS */
39
#include <linux/ipipe_trace.h>
40
#include <linux/ipipe_tickdev.h>
41
#include <linux/irq.h>
42
43
static int __ipipe_ptd_key_count;
44
45
static unsigned long __ipipe_ptd_key_map;
46
47
static unsigned long __ipipe_domain_slot_map;
48
49
struct ipipe_domain ipipe_root;
50
51
#ifndef CONFIG_SMP
52
/*
53
 * Create an alias to the unique root status, so that arch-dep code
54
 * may get simple and easy access to this percpu variable.  We also
55
 * create an array of pointers to the percpu domain data; this tends
56
 * to produce a better code when reaching non-root domains. We make
57
 * sure that the early boot code would be able to dereference the
58
 * pointer to the root domain data safely by statically initializing
59
 * its value (local_irq*() routines depend on this).
60
 */
61
#if __GNUC__ >= 4
62
extern unsigned long __ipipe_root_status
63
__attribute__((alias(__stringify(__raw_get_cpu_var(ipipe_percpu_darray)))));
64
EXPORT_SYMBOL(__ipipe_root_status);
65
#else /* __GNUC__ < 4 */
66
/*
67
 * Work around a GCC 3.x issue making alias symbols unusable as
68
 * constant initializers.
69
 */
70
unsigned long *const __ipipe_root_status_addr =
71
	&__raw_get_cpu_var(ipipe_percpu_darray)[IPIPE_ROOT_SLOT].status;
72
EXPORT_SYMBOL(__ipipe_root_status_addr);
73
#endif /* __GNUC__ < 4 */
74
75
DEFINE_PER_CPU(struct ipipe_percpu_domain_data *, ipipe_percpu_daddr[CONFIG_IPIPE_DOMAINS]) =
76
{ [IPIPE_ROOT_SLOT] = (struct ipipe_percpu_domain_data *)&__raw_get_cpu_var(ipipe_percpu_darray) };
77
EXPORT_PER_CPU_SYMBOL(ipipe_percpu_daddr);
78
#endif /* !CONFIG_SMP */
79
80
DEFINE_PER_CPU(struct ipipe_percpu_domain_data, ipipe_percpu_darray[CONFIG_IPIPE_DOMAINS]) =
81
{ [IPIPE_ROOT_SLOT] = { .status = IPIPE_STALL_MASK } }; /* Root domain stalled on each CPU at startup. */
82
83
DEFINE_PER_CPU(struct ipipe_domain *, ipipe_percpu_domain) = { &ipipe_root };
84
85
DEFINE_PER_CPU(unsigned long, ipipe_nmi_saved_root); /* Copy of root status during NMI */
86
87
static IPIPE_DEFINE_SPINLOCK(__ipipe_pipelock);
88
89
LIST_HEAD(__ipipe_pipeline);
90
91
unsigned long __ipipe_virtual_irq_map;
92
93
#ifdef CONFIG_PRINTK
94
unsigned __ipipe_printk_virq;
95
#endif /* CONFIG_PRINTK */
96
97
int __ipipe_event_monitors[IPIPE_NR_EVENTS];
98
99
#ifdef CONFIG_GENERIC_CLOCKEVENTS
100
101
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
102
103
static DEFINE_PER_CPU(struct ipipe_tick_device, ipipe_tick_cpu_device);
104
105
int ipipe_request_tickdev(const char *devname,
106
			  void (*emumode)(enum clock_event_mode mode,
107
					  struct clock_event_device *cdev),
108
			  int (*emutick)(unsigned long delta,
109
					 struct clock_event_device *cdev),
110
			  int cpu, unsigned long *tmfreq)
111
{
112
	struct ipipe_tick_device *itd;
113
	struct tick_device *slave;
114
	struct clock_event_device *evtdev;
115
	unsigned long long freq;
116
	unsigned long flags;
117
	int status;
118
119
	flags = ipipe_critical_enter(NULL);
120
121
	itd = &per_cpu(ipipe_tick_cpu_device, cpu);
122
123
	if (itd->slave != NULL) {
124
		status = -EBUSY;
125
		goto out;
126
	}
127
128
	slave = &per_cpu(tick_cpu_device, cpu);
129
130
	if (strcmp(slave->evtdev->name, devname)) {
131
		/*
132
		 * No conflict so far with the current tick device,
133
		 * check whether the requested device is sane and has
134
		 * been blessed by the kernel.
135
		 */
136
		status = __ipipe_check_tickdev(devname) ?
137
			CLOCK_EVT_MODE_UNUSED : CLOCK_EVT_MODE_SHUTDOWN;
138
		goto out;
139
	}
140
141
	/*
142
	 * Our caller asks for using the same clock event device for
143
	 * ticking than we do, let's create a tick emulation device to
144
	 * interpose on the set_next_event() method, so that we may
145
	 * both manage the device in oneshot mode. Only the tick
146
	 * emulation code will actually program the clockchip hardware
147
	 * for the next shot, though.
148
	 *
149
	 * CAUTION: we still have to grab the tick device even when it
150
	 * current runs in periodic mode, since the kernel may switch
151
	 * to oneshot dynamically (highres/no_hz tick mode).
152
	 */
153
154
	evtdev = slave->evtdev;
155
	status = evtdev->mode;
156
157
        if (status == CLOCK_EVT_MODE_SHUTDOWN)
158
                goto out;
159
160
	itd->slave = slave;
161
	itd->emul_set_mode = emumode;
162
	itd->emul_set_tick = emutick;
163
	itd->real_set_mode = evtdev->set_mode;
164
	itd->real_set_tick = evtdev->set_next_event;
165
	itd->real_max_delta_ns = evtdev->max_delta_ns;
166
	itd->real_mult = evtdev->mult;
167
	itd->real_shift = evtdev->shift;
168
	freq = (1000000000ULL * evtdev->mult) >> evtdev->shift;
169
	*tmfreq = (unsigned long)freq;
170
	evtdev->set_mode = emumode;
171
	evtdev->set_next_event = emutick;
172
	evtdev->max_delta_ns = ULONG_MAX;
173
	evtdev->mult = 1;
174
	evtdev->shift = 0;
175
out:
176
	ipipe_critical_exit(flags);
177
178
	return status;
179
}
180
181
void ipipe_release_tickdev(int cpu)
182
{
183
	struct ipipe_tick_device *itd;
184
	struct tick_device *slave;
185
	struct clock_event_device *evtdev;
186
	unsigned long flags;
187
188
	flags = ipipe_critical_enter(NULL);
189
190
	itd = &per_cpu(ipipe_tick_cpu_device, cpu);
191
192
	if (itd->slave != NULL) {
193
		slave = &per_cpu(tick_cpu_device, cpu);
194
		evtdev = slave->evtdev;
195
		evtdev->set_mode = itd->real_set_mode;
196
		evtdev->set_next_event = itd->real_set_tick;
197
		evtdev->max_delta_ns = itd->real_max_delta_ns;
198
		evtdev->mult = itd->real_mult;
199
		evtdev->shift = itd->real_shift;
200
		itd->slave = NULL;
201
	}
202
203
	ipipe_critical_exit(flags);
204
}
205
206
#endif /* CONFIG_GENERIC_CLOCKEVENTS */
207
208
void __init ipipe_init_early(void)
209
{
210
	struct ipipe_domain *ipd = &ipipe_root;
211
212
	/*
213
	 * Do the early init stuff. At this point, the kernel does not
214
	 * provide much services yet: be careful.
215
	 */
216
	__ipipe_check_platform(); /* Do platform dependent checks first. */
217
218
	/*
219
	 * A lightweight registration code for the root domain. We are
220
	 * running on the boot CPU, hw interrupts are off, and
221
	 * secondary CPUs are still lost in space.
222
	 */
223
224
	/* Reserve percpu data slot #0 for the root domain. */
225
	ipd->slot = 0;
226
	set_bit(0, &__ipipe_domain_slot_map);
227
228
	ipd->name = "Linux";
229
	ipd->domid = IPIPE_ROOT_ID;
230
	ipd->priority = IPIPE_ROOT_PRIO;
231
232
	__ipipe_init_stage(ipd);
233
234
	list_add_tail(&ipd->p_link, &__ipipe_pipeline);
235
236
	__ipipe_init_platform();
237
238
#ifdef CONFIG_PRINTK
239
	__ipipe_printk_virq = ipipe_alloc_virq();	/* Cannot fail here. */
240
	ipd->irqs[__ipipe_printk_virq].handler = &__ipipe_flush_printk;
241
	ipd->irqs[__ipipe_printk_virq].cookie = NULL;
242
	ipd->irqs[__ipipe_printk_virq].acknowledge = NULL;
243
	ipd->irqs[__ipipe_printk_virq].control = IPIPE_HANDLE_MASK;
244
#endif /* CONFIG_PRINTK */
245
}
246
247
void __init ipipe_init(void)
248
{
249
	/* Now we may engage the pipeline. */
250
	__ipipe_enable_pipeline();
251
252
	printk(KERN_INFO "I-pipe %s: pipeline enabled.\n",
253
	       IPIPE_VERSION_STRING);
254
}
255
256
void __ipipe_init_stage(struct ipipe_domain *ipd)
257
{
258
	struct ipipe_percpu_domain_data *p;
259
	unsigned long status;
260
	int cpu, n;
261
262
	for_each_online_cpu(cpu) {
263
		p = ipipe_percpudom_ptr(ipd, cpu);
264
		status = p->status;
265
		memset(p, 0, sizeof(*p));
266
		p->status = status;
267
	}
268
269
	for (n = 0; n < IPIPE_NR_IRQS; n++) {
270
		ipd->irqs[n].acknowledge = NULL;
271
		ipd->irqs[n].handler = NULL;
272
		ipd->irqs[n].control = IPIPE_PASS_MASK;	/* Pass but don't handle */
273
	}
274
275
	for (n = 0; n < IPIPE_NR_EVENTS; n++)
276
		ipd->evhand[n] = NULL;
277
278
	ipd->evself = 0LL;
279
	mutex_init(&ipd->mutex);
280
281
	__ipipe_hook_critical_ipi(ipd);
282
}
283
284
void __ipipe_cleanup_domain(struct ipipe_domain *ipd)
285
{
286
	ipipe_unstall_pipeline_from(ipd);
287
288
#ifdef CONFIG_SMP
289
	{
290
		struct ipipe_percpu_domain_data *p;
291
		int cpu;
292
293
		for_each_online_cpu(cpu) {
294
			p = ipipe_percpudom_ptr(ipd, cpu);
295
			while (__ipipe_ipending_p(p))
296
				cpu_relax();
297
		}
298
	}
299
#else
300
	__raw_get_cpu_var(ipipe_percpu_daddr)[ipd->slot] = NULL;
301
#endif
302
303
	clear_bit(ipd->slot, &__ipipe_domain_slot_map);
304
}
305
306
void __ipipe_unstall_root(void)
307
{
308
	struct ipipe_percpu_domain_data *p;
309
310
        local_irq_disable_hw();
311
312
#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
313
	/* This helps catching bad usage from assembly call sites. */
314
	BUG_ON(!__ipipe_root_domain_p);
315
#endif
316
317
	p = ipipe_root_cpudom_ptr();
318
319
        __clear_bit(IPIPE_STALL_FLAG, &p->status);
320
321
        if (unlikely(__ipipe_ipending_p(p)))
322
                __ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
323
324
        local_irq_enable_hw();
325
}
326
327
void __ipipe_restore_root(unsigned long x)
328
{
329
#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
330
	BUG_ON(!ipipe_root_domain_p);
331
#endif
332
333
	if (x)
334
		__ipipe_stall_root();
335
	else
336
		__ipipe_unstall_root();
337
}
338
339
void ipipe_stall_pipeline_from(struct ipipe_domain *ipd)
340
{
341
	unsigned long flags;
342
	/*
343
	 * We have to prevent against race on updating the status
344
	 * variable _and_ CPU migration at the same time, so disable
345
	 * hw IRQs here.
346
	 */
347
	local_irq_save_hw(flags);
348
349
	__set_bit(IPIPE_STALL_FLAG, &ipipe_cpudom_var(ipd, status));
350
351
	if (!__ipipe_pipeline_head_p(ipd))
352
		local_irq_restore_hw(flags);
353
}
354
355
unsigned long ipipe_test_and_stall_pipeline_from(struct ipipe_domain *ipd)
356
{
357
	unsigned long flags, x;
358
359
	/* See ipipe_stall_pipeline_from() */
360
	local_irq_save_hw(flags);
361
362
	x = __test_and_set_bit(IPIPE_STALL_FLAG, &ipipe_cpudom_var(ipd, status));
363
364
	if (!__ipipe_pipeline_head_p(ipd))
365
		local_irq_restore_hw(flags);
366
367
	return x;
368
}
369
370
unsigned long ipipe_test_and_unstall_pipeline_from(struct ipipe_domain *ipd)
371
{
372
	unsigned long flags, x;
373
	struct list_head *pos;
374
375
	local_irq_save_hw(flags);
376
377
	x = __test_and_clear_bit(IPIPE_STALL_FLAG, &ipipe_cpudom_var(ipd, status));
378
379
	if (ipd == __ipipe_current_domain)
380
		pos = &ipd->p_link;
381
	else
382
		pos = __ipipe_pipeline.next;
383
384
	__ipipe_walk_pipeline(pos);
385
386
	if (likely(__ipipe_pipeline_head_p(ipd)))
387
		local_irq_enable_hw();
388
	else
389
		local_irq_restore_hw(flags);
390
391
	return x;
392
}
393
394
void ipipe_restore_pipeline_from(struct ipipe_domain *ipd,
395
					  unsigned long x)
396
{
397
	if (x)
398
		ipipe_stall_pipeline_from(ipd);
399
	else
400
		ipipe_unstall_pipeline_from(ipd);
401
}
402
403
void ipipe_unstall_pipeline_head(void)
404
{
405
	struct ipipe_percpu_domain_data *p = ipipe_head_cpudom_ptr();
406
	struct ipipe_domain *head_domain;
407
408
	local_irq_disable_hw();
409
410
	__clear_bit(IPIPE_STALL_FLAG, &p->status);
411
412
	if (unlikely(__ipipe_ipending_p(p))) {
413
		head_domain = __ipipe_pipeline_head();
414
		if (likely(head_domain == __ipipe_current_domain))
415
			__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
416
		else
417
			__ipipe_walk_pipeline(&head_domain->p_link);
418
        }
419
420
	local_irq_enable_hw();
421
}
422
423
void __ipipe_restore_pipeline_head(unsigned long x)
424
{
425
	struct ipipe_percpu_domain_data *p = ipipe_head_cpudom_ptr();
426
	struct ipipe_domain *head_domain;
427
428
	local_irq_disable_hw();
429
430
	if (x) {
431
#ifdef CONFIG_DEBUG_KERNEL
432
		static int warned;
433
		if (!warned && test_and_set_bit(IPIPE_STALL_FLAG, &p->status)) {
434
			/*
435
			 * Already stalled albeit ipipe_restore_pipeline_head()
436
			 * should have detected it? Send a warning once.
437
			 */
438
			warned = 1;
439
			printk(KERN_WARNING
440
				   "I-pipe: ipipe_restore_pipeline_head() optimization failed.\n");
441
			dump_stack();
442
		}
443
#else /* !CONFIG_DEBUG_KERNEL */
444
		set_bit(IPIPE_STALL_FLAG, &p->status);
445
#endif /* CONFIG_DEBUG_KERNEL */
446
	}
447
	else {
448
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
449
		if (unlikely(__ipipe_ipending_p(p))) {
450
			head_domain = __ipipe_pipeline_head();
451
			if (likely(head_domain == __ipipe_current_domain))
452
				__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
453
			else
454
				__ipipe_walk_pipeline(&head_domain->p_link);
455
		}
456
		local_irq_enable_hw();
457
	}
458
}
459
460
void __ipipe_spin_lock_irq(raw_spinlock_t *lock)
461
{
462
	local_irq_disable_hw();
463
	__raw_spin_lock(lock);
464
	__set_bit(IPIPE_STALL_FLAG, &ipipe_this_cpudom_var(status));
465
}
466
467
void __ipipe_spin_unlock_irq(raw_spinlock_t *lock)
468
{
469
	__raw_spin_unlock(lock);
470
	__clear_bit(IPIPE_STALL_FLAG, &ipipe_this_cpudom_var(status));
471
	local_irq_enable_hw();
472
}
473
474
unsigned long __ipipe_spin_lock_irqsave(raw_spinlock_t *lock)
475
{
476
	unsigned long flags;
477
	int s;
478
479
	local_irq_save_hw(flags);
480
	__raw_spin_lock(lock);
481
	s = __test_and_set_bit(IPIPE_STALL_FLAG, &ipipe_this_cpudom_var(status));
482
483
	return raw_mangle_irq_bits(s, flags);
484
}
485
486
void __ipipe_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long x)
487
{
488
	__raw_spin_unlock(lock);
489
	if (!raw_demangle_irq_bits(&x))
490
		__clear_bit(IPIPE_STALL_FLAG, &ipipe_this_cpudom_var(status));
491
	local_irq_restore_hw(x);
492
}
493
494
void __ipipe_spin_unlock_irqbegin(ipipe_spinlock_t *lock)
495
{
496
	__raw_spin_unlock(&lock->bare_lock);
497
}
498
499
void __ipipe_spin_unlock_irqcomplete(unsigned long x)
500
{
501
	if (!raw_demangle_irq_bits(&x))
502
		__clear_bit(IPIPE_STALL_FLAG, &ipipe_this_cpudom_var(status));
503
	local_irq_restore_hw(x);
504
}
505
506
#ifdef __IPIPE_3LEVEL_IRQMAP
507
508
/* Must be called hw IRQs off. */
509
static inline void __ipipe_set_irq_held(struct ipipe_percpu_domain_data *p,
510
					unsigned int irq)
511
{
512
	__set_bit(irq, p->irqheld_map);
513
	p->irqall[irq]++;
514
}
515
516
/* Must be called hw IRQs off. */
517
void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq)
518
{
519
	struct ipipe_percpu_domain_data *p = ipipe_cpudom_ptr(ipd);
520
	int l0b, l1b;
521
522
	l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
523
	l1b = irq / BITS_PER_LONG;
524
	prefetchw(p);
525
526
	if (likely(!test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))) {
527
		__set_bit(irq, p->irqpend_lomap);
528
		__set_bit(l1b, p->irqpend_mdmap);
529
		__set_bit(l0b, &p->irqpend_himap);
530
	} else
531
		__set_bit(irq, p->irqheld_map);
532
533
	p->irqall[irq]++;
534
}
535
536
/* Must be called hw IRQs off. */
537
void __ipipe_lock_irq(struct ipipe_domain *ipd, int cpu, unsigned int irq)
538
{
539
	struct ipipe_percpu_domain_data *p;
540
	int l0b, l1b;
541
542
	if (unlikely(test_and_set_bit(IPIPE_LOCK_FLAG,
543
				      &ipd->irqs[irq].control)))
544
		return;
545
546
	l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
547
	l1b = irq / BITS_PER_LONG;
548
549
	p = ipipe_percpudom_ptr(ipd, cpu);
550
	if (__test_and_clear_bit(irq, p->irqpend_lomap)) {
551
		__set_bit(irq, p->irqheld_map);
552
		if (p->irqpend_lomap[l1b] == 0) {
553
			__clear_bit(l1b, p->irqpend_mdmap);
554
			if (p->irqpend_mdmap[l0b] == 0)
555
				__clear_bit(l0b, &p->irqpend_himap);
556
		}
557
	}
558
}
559
560
/* Must be called hw IRQs off. */
561
void __ipipe_unlock_irq(struct ipipe_domain *ipd, unsigned int irq)
562
{
563
	struct ipipe_percpu_domain_data *p;
564
	int l0b, l1b, cpu;
565
566
	if (unlikely(!test_and_clear_bit(IPIPE_LOCK_FLAG,
567
					 &ipd->irqs[irq].control)))
568
		return;
569
570
	l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
571
	l1b = irq / BITS_PER_LONG;
572
573
	for_each_online_cpu(cpu) {
574
		p = ipipe_percpudom_ptr(ipd, cpu);
575
		if (test_and_clear_bit(irq, p->irqheld_map)) {
576
			/* We need atomic ops here: */
577
			set_bit(irq, p->irqpend_lomap);
578
			set_bit(l1b, p->irqpend_mdmap);
579
			set_bit(l0b, &p->irqpend_himap);
580
		}
581
	}
582
}
583
584
static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p,
585
				   int dovirt)
586
{
587
	unsigned long l0m, l1m, l2m, himask, mdmask;
588
	int l0b, l1b, l2b, vl0b, vl1b;
589
	unsigned int irq;
590
591
	if (dovirt) {
592
		/*
593
		 * All virtual IRQs are mapped by a single long word.
594
		 * There is exactly BITS_PER_LONG virqs, and they are
595
		 * always last in the interrupt map, starting at
596
		 * IPIPE_VIRQ_BASE. Therefore, we only need to test a
597
		 * single bit within the high and middle maps to check
598
		 * whether a virtual IRQ is pending (the computations
599
		 * below are constant).
600
		 */
601
		vl0b = IPIPE_VIRQ_BASE / (BITS_PER_LONG * BITS_PER_LONG);
602
		himask = (1L << vl0b);
603
		vl1b = IPIPE_VIRQ_BASE / BITS_PER_LONG;
604
		mdmask = (1L << (vl1b & (BITS_PER_LONG-1)));
605
	} else
606
		himask = mdmask = ~0L;
607
608
	l0m = p->irqpend_himap & himask;
609
	if (unlikely(l0m == 0))
610
		return -1;
611
612
	l0b = __ipipe_ffnz(l0m);
613
	l1m = p->irqpend_mdmap[l0b] & mdmask;
614
	if (unlikely(l1m == 0))
615
		return -1;
616
617
	l1b = __ipipe_ffnz(l1m) + l0b * BITS_PER_LONG;
618
	l2m = p->irqpend_lomap[l1b];
619
	if (unlikely(l2m == 0))
620
		return -1;
621
622
	l2b = __ipipe_ffnz(l2m);
623
	irq = l1b * BITS_PER_LONG + l2b;
624
625
	__clear_bit(irq, p->irqpend_lomap);
626
	if (p->irqpend_lomap[l1b] == 0) {
627
		__clear_bit(l1b, p->irqpend_mdmap);
628
		if (p->irqpend_mdmap[l0b] == 0)
629
			__clear_bit(l0b, &p->irqpend_himap);
630
	}
631
632
	return irq;
633
}
634
635
#else /* __IPIPE_2LEVEL_IRQMAP */
636
637
/* Must be called hw IRQs off. */
638
static inline void __ipipe_set_irq_held(struct ipipe_percpu_domain_data *p,
639
					unsigned int irq)
640
{
641
	__set_bit(irq, p->irqheld_map);
642
	p->irqall[irq]++;
643
}
644
645
/* Must be called hw IRQs off. */
646
void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned irq)
647
{
648
	struct ipipe_percpu_domain_data *p = ipipe_cpudom_ptr(ipd);
649
	int l0b = irq / BITS_PER_LONG;
650
651
	prefetchw(p);
652
	
653
	if (likely(!test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))) {
654
		__set_bit(irq, p->irqpend_lomap);
655
		__set_bit(l0b, &p->irqpend_himap);
656
	} else
657
		__set_bit(irq, p->irqheld_map);
658
659
	p->irqall[irq]++;
660
}
661
662
/* Must be called hw IRQs off. */
663
void __ipipe_lock_irq(struct ipipe_domain *ipd, int cpu, unsigned irq)
664
{
665
	struct ipipe_percpu_domain_data *p;
666
	int l0b = irq / BITS_PER_LONG;
667
668
	if (unlikely(test_and_set_bit(IPIPE_LOCK_FLAG,
669
				      &ipd->irqs[irq].control)))
670
		return;
671
672
	p = ipipe_percpudom_ptr(ipd, cpu);
673
	if (__test_and_clear_bit(irq, p->irqpend_lomap)) {
674
		__set_bit(irq, p->irqheld_map);
675
		if (p->irqpend_lomap[l0b] == 0)
676
			__clear_bit(l0b, &p->irqpend_himap);
677
	}
678
}
679
680
/* Must be called hw IRQs off. */
681
void __ipipe_unlock_irq(struct ipipe_domain *ipd, unsigned irq)
682
{
683
	struct ipipe_percpu_domain_data *p;
684
	int l0b = irq / BITS_PER_LONG, cpu;
685
686
	if (unlikely(!test_and_clear_bit(IPIPE_LOCK_FLAG,
687
					 &ipd->irqs[irq].control)))
688
		return;
689
690
	for_each_online_cpu(cpu) {
691
		p = ipipe_percpudom_ptr(ipd, cpu);
692
		if (test_and_clear_bit(irq, p->irqheld_map)) {
693
			/* We need atomic ops here: */
694
			set_bit(irq, p->irqpend_lomap);
695
			set_bit(l0b, &p->irqpend_himap);
696
		}
697
	}
698
}
699
700
static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p,
701
				   int dovirt)
702
{
703
	unsigned long l0m, l1m, himask = ~0L;
704
	int l0b, l1b;
705
706
	himask <<= dovirt ? IPIPE_VIRQ_BASE/BITS_PER_LONG : 0;
707
708
	l0m = p->irqpend_himap & himask;
709
	if (unlikely(l0m == 0))
710
		return -1;
711
712
	l0b = __ipipe_ffnz(l0m);
713
	l1m = p->irqpend_lomap[l0b];
714
	if (unlikely(l1m == 0))
715
		return -1;
716
717
	l1b = __ipipe_ffnz(l1m);
718
	__clear_bit(l1b, &p->irqpend_lomap[l0b]);
719
	if (p->irqpend_lomap[l0b] == 0)
720
		__clear_bit(l0b, &p->irqpend_himap);
721
722
	return l0b * BITS_PER_LONG + l1b;
723
}
724
725
#endif /* __IPIPE_2LEVEL_IRQMAP */
726
727
/*
728
 * __ipipe_walk_pipeline(): Plays interrupts pending in the log. Must
729
 * be called with local hw interrupts disabled.
730
 */
731
void __ipipe_walk_pipeline(struct list_head *pos)
732
{
733
	struct ipipe_domain *this_domain = __ipipe_current_domain, *next_domain;
734
	struct ipipe_percpu_domain_data *p, *np;
735
736
	p = ipipe_cpudom_ptr(this_domain);
737
738
	while (pos != &__ipipe_pipeline) {
739
740
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
741
		np = ipipe_cpudom_ptr(next_domain);
742
743
		if (test_bit(IPIPE_STALL_FLAG, &np->status))
744
			break;	/* Stalled stage -- do not go further. */
745
746
		if (__ipipe_ipending_p(np)) {
747
			if (next_domain == this_domain)
748
				__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
749
			else {
750
751
				p->evsync = 0;
752
				__ipipe_current_domain = next_domain;
753
				ipipe_suspend_domain();	/* Sync stage and propagate interrupts. */
754
755
				if (__ipipe_current_domain == next_domain)
756
					__ipipe_current_domain = this_domain;
757
				/*
758
				 * Otherwise, something changed the current domain under our
759
				 * feet recycling the register set; do not override the new
760
				 * domain.
761
				 */
762
763
				if (__ipipe_ipending_p(p) &&
764
				    !test_bit(IPIPE_STALL_FLAG, &p->status))
765
					__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
766
			}
767
			break;
768
		} else if (next_domain == this_domain)
769
			break;
770
771
		pos = next_domain->p_link.next;
772
	}
773
}
774
775
/*
776
 * ipipe_suspend_domain() -- Suspend the current domain, switching to
777
 * the next one which has pending work down the pipeline.
778
 */
779
void ipipe_suspend_domain(void)
780
{
781
	struct ipipe_domain *this_domain, *next_domain;
782
	struct ipipe_percpu_domain_data *p;
783
	struct list_head *ln;
784
	unsigned long flags;
785
786
	local_irq_save_hw(flags);
787
788
	this_domain = next_domain = __ipipe_current_domain;
789
	p = ipipe_cpudom_ptr(this_domain);
790
	p->status &= ~(IPIPE_STALL_MASK|IPIPE_SYNC_MASK);
791
792
	if (__ipipe_ipending_p(p))
793
		goto sync_stage;
794
795
	for (;;) {
796
		ln = next_domain->p_link.next;
797
798
		if (ln == &__ipipe_pipeline)
799
			break;
800
801
		next_domain = list_entry(ln, struct ipipe_domain, p_link);
802
		p = ipipe_cpudom_ptr(next_domain);
803
804
		if (p->status & IPIPE_STALL_MASK)
805
			break;
806
807
		if (!__ipipe_ipending_p(p))
808
			continue;
809
810
		__ipipe_current_domain = next_domain;
811
sync_stage:
812
		__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
813
814
		if (__ipipe_current_domain != next_domain)
815
			/*
816
			 * Something has changed the current domain under our
817
			 * feet, recycling the register set; take note.
818
			 */
819
			this_domain = __ipipe_current_domain;
820
	}
821
822
	__ipipe_current_domain = this_domain;
823
824
	local_irq_restore_hw(flags);
825
}
826
827
828
/* ipipe_alloc_virq() -- Allocate a pipelined virtual/soft interrupt.
829
 * Virtual interrupts are handled in exactly the same way than their
830
 * hw-generated counterparts wrt pipelining.
831
 */
832
unsigned ipipe_alloc_virq(void)
833
{
834
	unsigned long flags, irq = 0;
835
	int ipos;
836
837
	spin_lock_irqsave(&__ipipe_pipelock, flags);
838
839
	if (__ipipe_virtual_irq_map != ~0) {
840
		ipos = ffz(__ipipe_virtual_irq_map);
841
		set_bit(ipos, &__ipipe_virtual_irq_map);
842
		irq = ipos + IPIPE_VIRQ_BASE;
843
	}
844
845
	spin_unlock_irqrestore(&__ipipe_pipelock, flags);
846
847
	return irq;
848
}
849
850
/*
851
 * ipipe_control_irq() -- Change modes of a pipelined interrupt for
852
 * the current domain.
853
 */
854
int ipipe_virtualize_irq(struct ipipe_domain *ipd,
855
			 unsigned irq,
856
			 ipipe_irq_handler_t handler,
857
			 void *cookie,
858
			 ipipe_irq_ackfn_t acknowledge,
859
			 unsigned modemask)
860
{
861
	ipipe_irq_handler_t old_handler;
862
	struct irq_desc *desc;
863
	unsigned long flags;
864
	int err;
865
866
	if (irq >= IPIPE_NR_IRQS)
867
		return -EINVAL;
868
869
	if (ipd->irqs[irq].control & IPIPE_SYSTEM_MASK)
870
		return -EPERM;
871
872
	if (!test_bit(IPIPE_AHEAD_FLAG, &ipd->flags))
873
		/* Silently unwire interrupts for non-heading domains. */
874
		modemask &= ~IPIPE_WIRED_MASK;
875
876
	spin_lock_irqsave(&__ipipe_pipelock, flags);
877
878
	old_handler = ipd->irqs[irq].handler;
879
880
	if (handler != NULL) {
881
		if (handler == IPIPE_SAME_HANDLER) {
882
			handler = old_handler;
883
			cookie = ipd->irqs[irq].cookie;
884
885
			if (handler == NULL) {
886
				err = -EINVAL;
887
				goto unlock_and_exit;
888
			}
889
		} else if ((modemask & IPIPE_EXCLUSIVE_MASK) != 0 &&
890
			   old_handler != NULL) {
891
			err = -EBUSY;
892
			goto unlock_and_exit;
893
		}
894
895
		/* Wired interrupts can only be delivered to domains
896
		 * always heading the pipeline, and using dynamic
897
		 * propagation. */
898
899
		if ((modemask & IPIPE_WIRED_MASK) != 0) {
900
			if ((modemask & (IPIPE_PASS_MASK | IPIPE_STICKY_MASK)) != 0) {
901
				err = -EINVAL;
902
				goto unlock_and_exit;
903
			}
904
			modemask |= (IPIPE_HANDLE_MASK);
905
		}
906
907
		if ((modemask & IPIPE_STICKY_MASK) != 0)
908
			modemask |= IPIPE_HANDLE_MASK;
909
	} else
910
		modemask &=
911
		    ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK |
912
		      IPIPE_EXCLUSIVE_MASK | IPIPE_WIRED_MASK);
913
914
	if (acknowledge == NULL && !ipipe_virtual_irq_p(irq))
915
		/*
916
		 * Acknowledge handler unspecified for a hw interrupt:
917
		 * use the Linux-defined handler instead.
918
		 */
919
		acknowledge = ipipe_root_domain->irqs[irq].acknowledge;
920
921
	ipd->irqs[irq].handler = handler;
922
	ipd->irqs[irq].cookie = cookie;
923
	ipd->irqs[irq].acknowledge = acknowledge;
924
	ipd->irqs[irq].control = modemask;
925
926
	if (irq < NR_IRQS && !ipipe_virtual_irq_p(irq)) {
927
		desc = irq_to_desc(irq);
928
		if (handler != NULL) {
929
			if (desc)
930
				__ipipe_enable_irqdesc(ipd, irq);
931
932
			if ((modemask & IPIPE_ENABLE_MASK) != 0) {
933
				if (ipd != __ipipe_current_domain) {
934
		/*
935
		 * IRQ enable/disable state is domain-sensitive, so we
936
		 * may not change it for another domain. What is
937
		 * allowed however is forcing some domain to handle an
938
		 * interrupt source, by passing the proper 'ipd'
939
		 * descriptor which thus may be different from
940
		 * __ipipe_current_domain.
941
		 */
942
					err = -EPERM;
943
					goto unlock_and_exit;
944
				}
945
				if (desc)
946
					__ipipe_enable_irq(irq);
947
			}
948
		} else if (old_handler != NULL && desc)
949
				__ipipe_disable_irqdesc(ipd, irq);
950
	}
951
952
	err = 0;
953
954
      unlock_and_exit:
955
956
	spin_unlock_irqrestore(&__ipipe_pipelock, flags);
957
958
	return err;
959
}
960
961
/* ipipe_control_irq() -- Change modes of a pipelined interrupt for
962
 * the current domain. */
963
964
int ipipe_control_irq(unsigned irq, unsigned clrmask, unsigned setmask)
965
{
966
	struct ipipe_domain *ipd;
967
	unsigned long flags;
968
969
	if (irq >= IPIPE_NR_IRQS)
970
		return -EINVAL;
971
972
	spin_lock_irqsave(&__ipipe_pipelock, flags);
973
974
	ipd = __ipipe_current_domain;
975
976
	if (ipd->irqs[irq].control & IPIPE_SYSTEM_MASK) {
977
		spin_unlock_irqrestore(&__ipipe_pipelock, flags);
978
		return -EPERM;
979
	}
980
981
	if (ipd->irqs[irq].handler == NULL)
982
		setmask &= ~(IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK);
983
984
	if ((setmask & IPIPE_STICKY_MASK) != 0)
985
		setmask |= IPIPE_HANDLE_MASK;
986
987
	if ((clrmask & (IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK)) != 0)	/* If one goes, both go. */
988
		clrmask |= (IPIPE_HANDLE_MASK | IPIPE_STICKY_MASK);
989
990
	ipd->irqs[irq].control &= ~clrmask;
991
	ipd->irqs[irq].control |= setmask;
992
993
	if ((setmask & IPIPE_ENABLE_MASK) != 0)
994
		__ipipe_enable_irq(irq);
995
	else if ((clrmask & IPIPE_ENABLE_MASK) != 0)
996
		__ipipe_disable_irq(irq);
997
998
	spin_unlock_irqrestore(&__ipipe_pipelock, flags);
999
1000
	return 0;
1001
}
1002
1003
/* __ipipe_dispatch_event() -- Low-level event dispatcher. */
1004
1005
int __ipipe_dispatch_event (unsigned event, void *data)
1006
{
1007
extern void *ipipe_irq_handler; void *handler; if (ipipe_irq_handler != __ipipe_handle_irq && (handler = ipipe_root_domain->evhand[event])) { return ((int (*)(unsigned long, void *))handler)(event, data); } else {
1008
	struct ipipe_domain *start_domain, *this_domain, *next_domain;
1009
	struct ipipe_percpu_domain_data *np;
1010
	ipipe_event_handler_t evhand;
1011
	struct list_head *pos, *npos;
1012
	unsigned long flags;
1013
	int propagate = 1;
1014
1015
	local_irq_save_hw(flags);
1016
1017
	start_domain = this_domain = __ipipe_current_domain;
1018
1019
	list_for_each_safe(pos, npos, &__ipipe_pipeline) {
1020
		/*
1021
		 * Note: Domain migration may occur while running
1022
		 * event or interrupt handlers, in which case the
1023
		 * current register set is going to be recycled for a
1024
		 * different domain than the initiating one. We do
1025
		 * care for that, always tracking the current domain
1026
		 * descriptor upon return from those handlers.
1027
		 */
1028
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
1029
		np = ipipe_cpudom_ptr(next_domain);
1030
1031
		/*
1032
		 * Keep a cached copy of the handler's address since
1033
		 * ipipe_catch_event() may clear it under our feet.
1034
		 */
1035
		evhand = next_domain->evhand[event];
1036
1037
		if (evhand != NULL) {
1038
			__ipipe_current_domain = next_domain;
1039
			np->evsync |= (1LL << event);
1040
			local_irq_restore_hw(flags);
1041
			propagate = !evhand(event, start_domain, data);
1042
			local_irq_save_hw(flags);
1043
			/*
1044
			 * We may have a migration issue here, if the
1045
			 * current task is migrated to another CPU on
1046
			 * behalf of the invoked handler, usually when
1047
			 * a syscall event is processed. However,
1048
			 * ipipe_catch_event() will make sure that a
1049
			 * CPU that clears a handler for any given
1050
			 * event will not attempt to wait for itself
1051
			 * to clear the evsync bit for that event,
1052
			 * which practically plugs the hole, without
1053
			 * resorting to a much more complex strategy.
1054
			 */
1055
			np->evsync &= ~(1LL << event);
1056
			if (__ipipe_current_domain != next_domain)
1057
				this_domain = __ipipe_current_domain;
1058
		}
1059
1060
		/* NEVER sync the root stage here. */
1061
		if (next_domain != ipipe_root_domain &&
1062
		    __ipipe_ipending_p(np) &&
1063
		    !test_bit(IPIPE_STALL_FLAG, &np->status)) {
1064
			__ipipe_current_domain = next_domain;
1065
			__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
1066
			if (__ipipe_current_domain != next_domain)
1067
				this_domain = __ipipe_current_domain;
1068
		}
1069
1070
		__ipipe_current_domain = this_domain;
1071
1072
		if (next_domain == this_domain || !propagate)
1073
			break;
1074
	}
1075
1076
	local_irq_restore_hw(flags);
1077
1078
	return !propagate;
1079
} }
1080
1081
/*
1082
 * __ipipe_dispatch_wired -- Wired interrupt dispatcher. Wired
1083
 * interrupts are immediately and unconditionally delivered to the
1084
 * domain heading the pipeline upon receipt, and such domain must have
1085
 * been registered as an invariant head for the system (priority ==
1086
 * IPIPE_HEAD_PRIORITY). The motivation for using wired interrupts is
1087
 * to get an extra-fast dispatching path for those IRQs, by relying on
1088
 * a straightforward logic based on assumptions that must always be
1089
 * true for invariant head domains.  The following assumptions are
1090
 * made when dealing with such interrupts:
1091
 *
1092
 * 1- Wired interrupts are purely dynamic, i.e. the decision to
1093
 * propagate them down the pipeline must be done from the head domain
1094
 * ISR.
1095
 * 2- Wired interrupts cannot be shared or sticky.
1096
 * 3- The root domain cannot be an invariant pipeline head, in
1097
 * consequence of what the root domain cannot handle wired
1098
 * interrupts.
1099
 * 4- Wired interrupts must have a valid acknowledge handler for the
1100
 * head domain (if needed, see __ipipe_handle_irq).
1101
 *
1102
 * Called with hw interrupts off.
1103
 */
1104
1105
void __ipipe_dispatch_wired(struct ipipe_domain *head, unsigned irq)
1106
{
1107
	struct ipipe_percpu_domain_data *p = ipipe_cpudom_ptr(head);
1108
1109
	prefetchw(p);
1110
1111
	if (unlikely(test_bit(IPIPE_LOCK_FLAG, &head->irqs[irq].control))) {
1112
		/*
1113
		 * If we can't process this IRQ right now, we must
1114
		 * mark it as held, so that it will get played during
1115
		 * normal log sync when the corresponding interrupt
1116
		 * source is eventually unlocked.
1117
		 */
1118
		__ipipe_set_irq_held(p, irq);
1119
		return;
1120
	}
1121
1122
	if (test_bit(IPIPE_STALL_FLAG, &p->status)) {
1123
		__ipipe_set_irq_pending(head, irq);
1124
		return;
1125
	}
1126
1127
	__ipipe_dispatch_wired_nocheck(head, irq);
1128
}
1129
1130
void __ipipe_dispatch_wired_nocheck(struct ipipe_domain *head, unsigned irq) /* hw interrupts off */
1131
{
1132
	struct ipipe_percpu_domain_data *p = ipipe_cpudom_ptr(head);
1133
	struct ipipe_domain *old;
1134
1135
	prefetchw(p);
1136
1137
	old = __ipipe_current_domain;
1138
	__ipipe_current_domain = head; /* Switch to the head domain. */
1139
1140
	p->irqall[irq]++;
1141
	__set_bit(IPIPE_STALL_FLAG, &p->status);
1142
	head->irqs[irq].handler(irq, head->irqs[irq].cookie); /* Call the ISR. */
1143
	__ipipe_run_irqtail();
1144
	__clear_bit(IPIPE_STALL_FLAG, &p->status);
1145
1146
	if (__ipipe_current_domain == head) {
1147
		__ipipe_current_domain = old;
1148
		if (old == head) {
1149
			if (__ipipe_ipending_p(p))
1150
				__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
1151
			return;
1152
		}
1153
	}
1154
1155
	__ipipe_walk_pipeline(&head->p_link);
1156
}
1157
1158
/*
1159
 * __ipipe_sync_stage() -- Flush the pending IRQs for the current
1160
 * domain (and processor). This routine flushes the interrupt log
1161
 * (see "Optimistic interrupt protection" from D. Stodolsky et al. for
1162
 * more on the deferred interrupt scheme). Every interrupt that
1163
 * occurred while the pipeline was stalled gets played. WARNING:
1164
 * callers on SMP boxen should always check for CPU migration on
1165
 * return of this routine.
1166
 *
1167
 * This routine must be called with hw interrupts off.
1168
 */
1169
void __ipipe_sync_stage(int dovirt)
1170
{
1171
	struct ipipe_percpu_domain_data *p;
1172
	struct ipipe_domain *ipd;
1173
	int cpu, irq;
1174
1175
	ipd = __ipipe_current_domain;
1176
	p = ipipe_cpudom_ptr(ipd);
1177
1178
	if (__test_and_set_bit(IPIPE_SYNC_FLAG, &p->status)) {
1179
		/*
1180
		 * Some questionable code in the root domain may enter
1181
		 * busy waits for IRQs over interrupt context, so we
1182
		 * unfortunately have to allow piling up IRQs for
1183
		 * them. Non-root domains are not allowed to do this.
1184
		 */
1185
		if (ipd != ipipe_root_domain)
1186
			return;
1187
	}
1188
1189
	cpu = ipipe_processor_id();
1190
1191
	for (;;) {
1192
		irq = __ipipe_next_irq(p, dovirt);
1193
		if (irq < 0)
1194
			break;
1195
		/*
1196
		 * Make sure the compiler does not reorder
1197
		 * wrongly, so that all updates to maps are
1198
		 * done before the handler gets called.
1199
		 */
1200
		barrier();
1201
1202
		if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
1203
			continue;
1204
1205
		__set_bit(IPIPE_STALL_FLAG, &p->status);
1206
		smp_wmb();
1207
1208
		if (ipd == ipipe_root_domain)
1209
			trace_hardirqs_off();
1210
1211
		__ipipe_run_isr(ipd, irq);
1212
		barrier();
1213
		p = ipipe_cpudom_ptr(__ipipe_current_domain);
1214
#ifdef CONFIG_SMP
1215
		{
1216
			int newcpu = ipipe_processor_id();
1217
1218
			if (newcpu != cpu) {	/* Handle CPU migration. */
1219
				/*
1220
				 * We expect any domain to clear the SYNC bit each
1221
				 * time it switches in a new task, so that preemptions
1222
				 * and/or CPU migrations (in the SMP case) over the
1223
				 * ISR do not lock out the log syncer for some
1224
				 * indefinite amount of time. In the Linux case,
1225
				 * schedule() handles this (see kernel/sched.c). For
1226
				 * this reason, we don't bother clearing it here for
1227
				 * the source CPU in the migration handling case,
1228
				 * since it must have scheduled another task in by
1229
				 * now.
1230
				 */
1231
				__set_bit(IPIPE_SYNC_FLAG, &p->status);
1232
				cpu = newcpu;
1233
			}
1234
		}
1235
#endif	/* CONFIG_SMP */
1236
#ifdef CONFIG_TRACE_IRQFLAGS
1237
		if (__ipipe_root_domain_p &&
1238
		    test_bit(IPIPE_STALL_FLAG, &p->status))
1239
			trace_hardirqs_on();
1240
#endif
1241
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
1242
	}
1243
1244
	__clear_bit(IPIPE_SYNC_FLAG, &p->status);
1245
}
1246
1247
/* ipipe_register_domain() -- Link a new domain to the pipeline. */
1248
1249
int ipipe_register_domain(struct ipipe_domain *ipd,
1250
			  struct ipipe_domain_attr *attr)
1251
{
1252
	struct ipipe_percpu_domain_data *p;
1253
	struct list_head *pos = NULL;
1254
	struct ipipe_domain *_ipd;
1255
	unsigned long flags;
1256
1257
	if (!ipipe_root_domain_p) {
1258
		printk(KERN_WARNING
1259
		       "I-pipe: Only the root domain may register a new domain.\n");
1260
		return -EPERM;
1261
	}
1262
1263
	flags = ipipe_critical_enter(NULL);
1264
1265
	if (attr->priority == IPIPE_HEAD_PRIORITY) {
1266
		if (test_bit(IPIPE_HEAD_SLOT, &__ipipe_domain_slot_map)) {
1267
			ipipe_critical_exit(flags);
1268
			return -EAGAIN;	/* Cannot override current head. */
1269
		}
1270
		ipd->slot = IPIPE_HEAD_SLOT;
1271
	} else
1272
		ipd->slot = ffz(__ipipe_domain_slot_map);
1273
1274
	if (ipd->slot < CONFIG_IPIPE_DOMAINS) {
1275
		set_bit(ipd->slot, &__ipipe_domain_slot_map);
1276
		list_for_each(pos, &__ipipe_pipeline) {
1277
			_ipd = list_entry(pos, struct ipipe_domain, p_link);
1278
			if (_ipd->domid == attr->domid)
1279
				break;
1280
		}
1281
	}
1282
1283
	ipipe_critical_exit(flags);
1284
1285
	if (pos != &__ipipe_pipeline) {
1286
		if (ipd->slot < CONFIG_IPIPE_DOMAINS)
1287
			clear_bit(ipd->slot, &__ipipe_domain_slot_map);
1288
		return -EBUSY;
1289
	}
1290
1291
#ifndef CONFIG_SMP
1292
	/*
1293
	 * Set up the perdomain pointers for direct access to the
1294
	 * percpu domain data. This saves a costly multiply each time
1295
	 * we need to refer to the contents of the percpu domain data
1296
	 * array.
1297
	 */
1298
	__raw_get_cpu_var(ipipe_percpu_daddr)[ipd->slot] = &__raw_get_cpu_var(ipipe_percpu_darray)[ipd->slot];
1299
#endif
1300
1301
	ipd->name = attr->name;
1302
	ipd->domid = attr->domid;
1303
	ipd->pdd = attr->pdd;
1304
	ipd->flags = 0;
1305
1306
	if (attr->priority == IPIPE_HEAD_PRIORITY) {
1307
		ipd->priority = INT_MAX;
1308
		__set_bit(IPIPE_AHEAD_FLAG,&ipd->flags);
1309
	}
1310
	else
1311
		ipd->priority = attr->priority;
1312
1313
	__ipipe_init_stage(ipd);
1314
1315
	INIT_LIST_HEAD(&ipd->p_link);
1316
1317
#ifdef CONFIG_PROC_FS
1318
	__ipipe_add_domain_proc(ipd);
1319
#endif /* CONFIG_PROC_FS */
1320
1321
	flags = ipipe_critical_enter(NULL);
1322
1323
	list_for_each(pos, &__ipipe_pipeline) {
1324
		_ipd = list_entry(pos, struct ipipe_domain, p_link);
1325
		if (ipd->priority > _ipd->priority)
1326
			break;
1327
	}
1328
1329
	list_add_tail(&ipd->p_link, pos);
1330
1331
	ipipe_critical_exit(flags);
1332
1333
	printk(KERN_INFO "I-pipe: Domain %s registered.\n", ipd->name);
1334
1335
	if (attr->entry == NULL)
1336
		return 0;
1337
1338
	/*
1339
	 * Finally, allow the new domain to perform its initialization
1340
	 * duties.
1341
	 */
1342
	local_irq_save_hw_smp(flags);
1343
	__ipipe_current_domain = ipd;
1344
	local_irq_restore_hw_smp(flags);
1345
	attr->entry();
1346
	local_irq_save_hw(flags);
1347
	__ipipe_current_domain = ipipe_root_domain;
1348
	p = ipipe_root_cpudom_ptr();
1349
1350
	if (__ipipe_ipending_p(p) &&
1351
	    !test_bit(IPIPE_STALL_FLAG, &p->status))
1352
		__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
1353
1354
	local_irq_restore_hw(flags);
1355
1356
	return 0;
1357
}
1358
1359
/* ipipe_unregister_domain() -- Remove a domain from the pipeline. */
1360
1361
int ipipe_unregister_domain(struct ipipe_domain *ipd)
1362
{
1363
	unsigned long flags;
1364
1365
	if (!ipipe_root_domain_p) {
1366
		printk(KERN_WARNING
1367
		       "I-pipe: Only the root domain may unregister a domain.\n");
1368
		return -EPERM;
1369
	}
1370
1371
	if (ipd == ipipe_root_domain) {
1372
		printk(KERN_WARNING
1373
		       "I-pipe: Cannot unregister the root domain.\n");
1374
		return -EPERM;
1375
	}
1376
#ifdef CONFIG_SMP
1377
	{
1378
		struct ipipe_percpu_domain_data *p;
1379
		unsigned int irq;
1380
		int cpu;
1381
1382
		/*
1383
		 * In the SMP case, wait for the logged events to drain on
1384
		 * other processors before eventually removing the domain
1385
		 * from the pipeline.
1386
		 */
1387
1388
		ipipe_unstall_pipeline_from(ipd);
1389
1390
		flags = ipipe_critical_enter(NULL);
1391
1392
		for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
1393
			clear_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control);
1394
			clear_bit(IPIPE_STICKY_FLAG, &ipd->irqs[irq].control);
1395
			set_bit(IPIPE_PASS_FLAG, &ipd->irqs[irq].control);
1396
		}
1397
1398
		ipipe_critical_exit(flags);
1399
1400
		for_each_online_cpu(cpu) {
1401
			p = ipipe_percpudom_ptr(ipd, cpu);
1402
			while (__ipipe_ipending_p(p))
1403
				cpu_relax();
1404
		}
1405
	}
1406
#endif	/* CONFIG_SMP */
1407
1408
	mutex_lock(&ipd->mutex);
1409
1410
#ifdef CONFIG_PROC_FS
1411
	__ipipe_remove_domain_proc(ipd);
1412
#endif /* CONFIG_PROC_FS */
1413
1414
	/*
1415
	 * Simply remove the domain from the pipeline and we are almost done.
1416
	 */
1417
1418
	flags = ipipe_critical_enter(NULL);
1419
	list_del_init(&ipd->p_link);
1420
	ipipe_critical_exit(flags);
1421
1422
	__ipipe_cleanup_domain(ipd);
1423
1424
	mutex_unlock(&ipd->mutex);
1425
1426
	printk(KERN_INFO "I-pipe: Domain %s unregistered.\n", ipd->name);
1427
1428
	return 0;
1429
}
1430
1431
/*
1432
 * ipipe_propagate_irq() -- Force a given IRQ propagation on behalf of
1433
 * a running interrupt handler to the next domain down the pipeline.
1434
 * ipipe_schedule_irq() -- Does almost the same as above, but attempts
1435
 * to pend the interrupt for the current domain first.
1436
 * Must be called hw IRQs off.
1437
 */
1438
void __ipipe_pend_irq(unsigned irq, struct list_head *head)
1439
{
1440
	struct ipipe_domain *ipd;
1441
	struct list_head *ln;
1442
1443
#ifdef CONFIG_IPIPE_DEBUG
1444
	BUG_ON(irq >= IPIPE_NR_IRQS ||
1445
	       (ipipe_virtual_irq_p(irq)
1446
		&& !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)));
1447
#endif
1448
	for (ln = head; ln != &__ipipe_pipeline; ln = ipd->p_link.next) {
1449
		ipd = list_entry(ln, struct ipipe_domain, p_link);
1450
		if (test_bit(IPIPE_HANDLE_FLAG, &ipd->irqs[irq].control)) {
1451
			__ipipe_set_irq_pending(ipd, irq);
1452
			return;
1453
		}
1454
	}
1455
}
1456
1457
/* ipipe_free_virq() -- Release a virtual/soft interrupt. */
1458
1459
int ipipe_free_virq(unsigned virq)
1460
{
1461
	if (!ipipe_virtual_irq_p(virq))
1462
		return -EINVAL;
1463
1464
	clear_bit(virq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map);
1465
1466
	return 0;
1467
}
1468
1469
void ipipe_init_attr(struct ipipe_domain_attr *attr)
1470
{
1471
	attr->name = "anon";
1472
	attr->domid = 1;
1473
	attr->entry = NULL;
1474
	attr->priority = IPIPE_ROOT_PRIO;
1475
	attr->pdd = NULL;
1476
}
1477
1478
/*
1479
 * ipipe_catch_event() -- Interpose or remove an event handler for a
1480
 * given domain.
1481
 */
1482
ipipe_event_handler_t ipipe_catch_event(struct ipipe_domain *ipd,
1483
					unsigned event,
1484
					ipipe_event_handler_t handler)
1485
{
1486
	ipipe_event_handler_t old_handler;
1487
	unsigned long flags;
1488
	int self = 0, cpu;
1489
1490
	if (event & IPIPE_EVENT_SELF) {
1491
		event &= ~IPIPE_EVENT_SELF;
1492
		self = 1;
1493
	}
1494
1495
	if (event >= IPIPE_NR_EVENTS)
1496
		return NULL;
1497
1498
	flags = ipipe_critical_enter(NULL);
1499
1500
	if (!(old_handler = xchg(&ipd->evhand[event],handler)))	{
1501
		if (handler) {
1502
			if (self)
1503
				ipd->evself |= (1LL << event);
1504
			else
1505
				__ipipe_event_monitors[event]++;
1506
		}
1507
	}
1508
	else if (!handler) {
1509
		if (ipd->evself & (1LL << event))
1510
			ipd->evself &= ~(1LL << event);
1511
		else
1512
			__ipipe_event_monitors[event]--;
1513
	} else if ((ipd->evself & (1LL << event)) && !self) {
1514
			__ipipe_event_monitors[event]++;
1515
			ipd->evself &= ~(1LL << event);
1516
	} else if (!(ipd->evself & (1LL << event)) && self) {
1517
			__ipipe_event_monitors[event]--;
1518
			ipd->evself |= (1LL << event);
1519
	}
1520
1521
	ipipe_critical_exit(flags);
1522
1523
	if (!handler && ipipe_root_domain_p) {
1524
		/*
1525
		 * If we cleared a handler on behalf of the root
1526
		 * domain, we have to wait for any current invocation
1527
		 * to drain, since our caller might subsequently unmap
1528
		 * the target domain. To this aim, this code
1529
		 * synchronizes with __ipipe_dispatch_event(),
1530
		 * guaranteeing that either the dispatcher sees a null
1531
		 * handler in which case it discards the invocation
1532
		 * (which also prevents from entering a livelock), or
1533
		 * finds a valid handler and calls it. Symmetrically,
1534
		 * ipipe_catch_event() ensures that the called code
1535
		 * won't be unmapped under our feet until the event
1536
		 * synchronization flag is cleared for the given event
1537
		 * on all CPUs.
1538
		 */
1539
		preempt_disable();
1540
		cpu = smp_processor_id();
1541
		/*
1542
		 * Hack: this solves the potential migration issue
1543
		 * raised in __ipipe_dispatch_event(). This is a
1544
		 * work-around which makes the assumption that other
1545
		 * CPUs will subsequently, either process at least one
1546
		 * interrupt for the target domain, or call
1547
		 * __ipipe_dispatch_event() without going through a
1548
		 * migration while running the handler at least once;
1549
		 * practically, this is safe on any normally running
1550
		 * system.
1551
		 */
1552
		ipipe_percpudom(ipd, evsync, cpu) &= ~(1LL << event);
1553
		preempt_enable();
1554
1555
		for_each_online_cpu(cpu) {
1556
			while (ipipe_percpudom(ipd, evsync, cpu) & (1LL << event))
1557
				schedule_timeout_interruptible(HZ / 50);
1558
		}
1559
	}
1560
1561
	return old_handler;
1562
}
1563
1564
cpumask_t ipipe_set_irq_affinity (unsigned irq, cpumask_t cpumask)
1565
{
1566
#ifdef CONFIG_SMP
1567
	if (irq >= NR_IRQS) // if (irq >= IPIPE_NR_XIRQS)
1568
		/* Allow changing affinity of external IRQs only. */
1569
		return CPU_MASK_NONE;
1570
1571
	if (num_online_cpus() > 1)
1572
		return __ipipe_set_irq_affinity(irq,cpumask);
1573
#endif /* CONFIG_SMP */
1574
1575
	return CPU_MASK_NONE;
1576
}
1577
1578
int ipipe_send_ipi (unsigned ipi, cpumask_t cpumask)
1579
1580
{
1581
#ifdef CONFIG_SMP
1582
	return __ipipe_send_ipi(ipi,cpumask);
1583
#else /* !CONFIG_SMP */
1584
	return -EINVAL;
1585
#endif /* CONFIG_SMP */
1586
}
1587
1588
int ipipe_alloc_ptdkey (void)
1589
{
1590
	unsigned long flags;
1591
	int key = -1;
1592
1593
	spin_lock_irqsave(&__ipipe_pipelock,flags);
1594
1595
	if (__ipipe_ptd_key_count < IPIPE_ROOT_NPTDKEYS) {
1596
		key = ffz(__ipipe_ptd_key_map);
1597
		set_bit(key,&__ipipe_ptd_key_map);
1598
		__ipipe_ptd_key_count++;
1599
	}
1600
1601
	spin_unlock_irqrestore(&__ipipe_pipelock,flags);
1602
1603
	return key;
1604
}
1605
1606
int ipipe_free_ptdkey (int key)
1607
{
1608
	unsigned long flags;
1609
1610
	if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
1611
		return -EINVAL;
1612
1613
	spin_lock_irqsave(&__ipipe_pipelock,flags);
1614
1615
	if (test_and_clear_bit(key,&__ipipe_ptd_key_map))
1616
		__ipipe_ptd_key_count--;
1617
1618
	spin_unlock_irqrestore(&__ipipe_pipelock,flags);
1619
1620
	return 0;
1621
}
1622
1623
int ipipe_set_ptd (int key, void *value)
1624
1625
{
1626
	if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
1627
		return -EINVAL;
1628
1629
	current->ptd[key] = value;
1630
1631
	return 0;
1632
}
1633
1634
void *ipipe_get_ptd (int key)
1635
1636
{
1637
	if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
1638
		return NULL;
1639
1640
	return current->ptd[key];
1641
}
1642
1643
#ifdef CONFIG_PROC_FS
1644
1645
struct proc_dir_entry *ipipe_proc_root;
1646
1647
static int __ipipe_version_info_proc(char *page,
1648
				     char **start,
1649
				     off_t off, int count, int *eof, void *data)
1650
{
1651
	int len = sprintf(page, "%s\n", IPIPE_VERSION_STRING);
1652
1653
	len -= off;
1654
1655
	if (len <= off + count)
1656
		*eof = 1;
1657
1658
	*start = page + off;
1659
1660
	if(len > count)
1661
		len = count;
1662
1663
	if(len < 0)
1664
		len = 0;
1665
1666
	return len;
1667
}
1668
1669
static int __ipipe_common_info_show(struct seq_file *p, void *data)
1670
{
1671
	struct ipipe_domain *ipd = (struct ipipe_domain *)p->private;
1672
	char handling, stickiness, lockbit, exclusive, virtuality;
1673
1674
	unsigned long ctlbits;
1675
	unsigned irq;
1676
1677
	seq_printf(p, "       +----- Handling ([A]ccepted, [G]rabbed, [W]ired, [D]iscarded)\n");
1678
	seq_printf(p, "       |+---- Sticky\n");
1679
	seq_printf(p, "       ||+--- Locked\n");
1680
	seq_printf(p, "       |||+-- Exclusive\n");
1681
	seq_printf(p, "       ||||+- Virtual\n");
1682
	seq_printf(p, "[IRQ]  |||||\n");
1683
1684
	mutex_lock(&ipd->mutex);
1685
1686
	for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
1687
		/* Remember to protect against
1688
		 * ipipe_virtual_irq/ipipe_control_irq if more fields
1689
		 * get involved. */
1690
		ctlbits = ipd->irqs[irq].control;
1691
1692
		if (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq))
1693
			/*
1694
			 * There might be a hole between the last external
1695
			 * IRQ and the first virtual one; skip it.
1696
			 */
1697
			continue;
1698
1699
		if (ipipe_virtual_irq_p(irq)
1700
		    && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))
1701
			/* Non-allocated virtual IRQ; skip it. */
1702
			continue;
1703
1704
		/*
1705
		 * Statuses are as follows:
1706
		 * o "accepted" means handled _and_ passed down the pipeline.
1707
		 * o "grabbed" means handled, but the interrupt might be
1708
		 * terminated _or_ passed down the pipeline depending on
1709
		 * what the domain handler asks for to the I-pipe.
1710
		 * o "wired" is basically the same as "grabbed", except that
1711
		 * the interrupt is unconditionally delivered to an invariant
1712
		 * pipeline head domain.
1713
		 * o "passed" means unhandled by the domain but passed
1714
		 * down the pipeline.
1715
		 * o "discarded" means unhandled and _not_ passed down the
1716
		 * pipeline. The interrupt merely disappears from the
1717
		 * current domain down to the end of the pipeline.
1718
		 */
1719
		if (ctlbits & IPIPE_HANDLE_MASK) {
1720
			if (ctlbits & IPIPE_PASS_MASK)
1721
				handling = 'A';
1722
			else if (ctlbits & IPIPE_WIRED_MASK)
1723
				handling = 'W';
1724
			else
1725
				handling = 'G';
1726
		} else if (ctlbits & IPIPE_PASS_MASK)
1727
			/* Do not output if no major action is taken. */
1728
			continue;
1729
		else
1730
			handling = 'D';
1731
1732
		if (ctlbits & IPIPE_STICKY_MASK)
1733
			stickiness = 'S';
1734
		else
1735
			stickiness = '.';
1736
1737
		if (ctlbits & IPIPE_LOCK_MASK)
1738
			lockbit = 'L';
1739
		else
1740
			lockbit = '.';
1741
1742
		if (ctlbits & IPIPE_EXCLUSIVE_MASK)
1743
			exclusive = 'X';
1744
		else
1745
			exclusive = '.';
1746
1747
		if (ipipe_virtual_irq_p(irq))
1748
			virtuality = 'V';
1749
		else
1750
			virtuality = '.';
1751
1752
		seq_printf(p, " %3u:  %c%c%c%c%c\n",
1753
			     irq, handling, stickiness, lockbit, exclusive, virtuality);
1754
	}
1755
1756
	seq_printf(p, "[Domain info]\n");
1757
1758
	seq_printf(p, "id=0x%.8x\n", ipd->domid);
1759
1760
	if (test_bit(IPIPE_AHEAD_FLAG,&ipd->flags))
1761
		seq_printf(p, "priority=topmost\n");
1762
	else
1763
		seq_printf(p, "priority=%d\n", ipd->priority);
1764
1765
	mutex_unlock(&ipd->mutex);
1766
1767
	return 0;
1768
}
1769
1770
static int __ipipe_common_info_open(struct inode *inode, struct file *file)
1771
{
1772
	return single_open(file, __ipipe_common_info_show, PROC_I(inode)->pde->data);
1773
}
1774
1775
static struct file_operations __ipipe_info_proc_ops = {
1776
	.owner		= THIS_MODULE,
1777
	.open		= __ipipe_common_info_open,
1778
	.read		= seq_read,
1779
	.llseek		= seq_lseek,
1780
	.release	= single_release,
1781
};
1782
1783
void __ipipe_add_domain_proc(struct ipipe_domain *ipd)
1784
{
1785
	struct proc_dir_entry *e = create_proc_entry(ipd->name, 0444, ipipe_proc_root);
1786
	if (e) {
1787
		e->proc_fops = &__ipipe_info_proc_ops;
1788
		e->data = (void*) ipd;
1789
	}
1790
}
1791
1792
void __ipipe_remove_domain_proc(struct ipipe_domain *ipd)
1793
{
1794
	remove_proc_entry(ipd->name,ipipe_proc_root);
1795
}
1796
1797
void __init ipipe_init_proc(void)
1798
{
1799
	ipipe_proc_root = create_proc_entry("ipipe",S_IFDIR, 0);
1800
	create_proc_read_entry("version",0444,ipipe_proc_root,&__ipipe_version_info_proc,NULL);
1801
	__ipipe_add_domain_proc(ipipe_root_domain);
1802
1803
	__ipipe_init_tracer();
1804
}
1805
1806
#endif	/* CONFIG_PROC_FS */
1807
1808
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
1809
1810
DEFINE_PER_CPU(int, ipipe_percpu_context_check) = { 1 };
1811
DEFINE_PER_CPU(int, ipipe_saved_context_check_state);
1812
1813
void ipipe_check_context(struct ipipe_domain *border_domain)
1814
{
1815
        struct ipipe_percpu_domain_data *p; 
1816
        struct ipipe_domain *this_domain; 
1817
        unsigned long flags;
1818
	int cpu;
1819
 
1820
        local_irq_save_hw_smp(flags); 
1821
1822
        this_domain = __ipipe_current_domain; 
1823
        p = ipipe_head_cpudom_ptr(); 
1824
        if (likely(this_domain->priority <= border_domain->priority && 
1825
		   !test_bit(IPIPE_STALL_FLAG, &p->status))) { 
1826
                local_irq_restore_hw_smp(flags); 
1827
                return; 
1828
        } 
1829
 
1830
	cpu = ipipe_processor_id();
1831
        if (!per_cpu(ipipe_percpu_context_check, cpu)) { 
1832
                local_irq_restore_hw_smp(flags); 
1833
                return; 
1834
        } 
1835
 
1836
        local_irq_restore_hw_smp(flags); 
1837
1838
	ipipe_context_check_off();
1839
	ipipe_trace_panic_freeze();
1840
	ipipe_set_printk_sync(__ipipe_current_domain);
1841
1842
	if (this_domain->priority > border_domain->priority)
1843
		printk(KERN_ERR "I-pipe: Detected illicit call from domain "
1844
				"'%s'\n"
1845
		       KERN_ERR "        into a service reserved for domain "
1846
				"'%s' and below.\n",
1847
		       this_domain->name, border_domain->name);
1848
	else
1849
		printk(KERN_ERR "I-pipe: Detected stalled topmost domain, "
1850
				"probably caused by a bug.\n"
1851
				"        A critical section may have been "
1852
				"left unterminated.\n");
1853
	dump_stack();
1854
	ipipe_trace_panic_dump();
1855
}
1856
1857
EXPORT_SYMBOL(ipipe_check_context);
1858
1859
#endif /* CONFIG_IPIPE_DEBUG_CONTEXT */
1860
1861
#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP)
1862
1863
int notrace __ipipe_check_percpu_access(void)
1864
{
1865
	struct ipipe_percpu_domain_data *p;
1866
	struct ipipe_domain *this_domain;
1867
	unsigned long flags;
1868
	int ret = 0;
1869
1870
	local_irq_save_hw_notrace(flags);
1871
1872
	this_domain = __raw_get_cpu_var(ipipe_percpu_domain);
1873
1874
	/*
1875
	 * Only the root domain may implement preemptive CPU migration
1876
	 * of tasks, so anything above in the pipeline should be fine.
1877
	 */
1878
	if (this_domain->priority > IPIPE_ROOT_PRIO)
1879
		goto out;
1880
1881
	if (raw_irqs_disabled_flags(flags))
1882
		goto out;
1883
1884
	/*
1885
	 * Last chance: hw interrupts were enabled on entry while
1886
	 * running over the root domain, but the root stage might be
1887
	 * currently stalled, in which case preemption would be
1888
	 * disabled, and no migration could occur.
1889
	 */
1890
	if (this_domain == ipipe_root_domain) {
1891
		p = ipipe_root_cpudom_ptr(); 
1892
		if (test_bit(IPIPE_STALL_FLAG, &p->status))
1893
			goto out;
1894
	}
1895
	/*
1896
	 * Our caller may end up accessing the wrong per-cpu variable
1897
	 * instance due to CPU migration; tell it to complain about
1898
	 * this.
1899
	 */
1900
	ret = 1;
1901
out:
1902
	local_irq_restore_hw_notrace(flags);
1903
1904
	return ret;
1905
}
1906
1907
#endif /* CONFIG_IPIPE_DEBUG_INTERNAL && CONFIG_SMP */
1908
1909
EXPORT_SYMBOL(ipipe_virtualize_irq);
1910
EXPORT_SYMBOL(ipipe_control_irq);
1911
EXPORT_SYMBOL(ipipe_suspend_domain);
1912
EXPORT_SYMBOL(ipipe_alloc_virq);
1913
EXPORT_PER_CPU_SYMBOL(ipipe_percpu_domain);
1914
EXPORT_PER_CPU_SYMBOL(ipipe_percpu_darray);
1915
EXPORT_SYMBOL(ipipe_root);
1916
EXPORT_SYMBOL(ipipe_stall_pipeline_from);
1917
EXPORT_SYMBOL(ipipe_test_and_stall_pipeline_from);
1918
EXPORT_SYMBOL(ipipe_test_and_unstall_pipeline_from);
1919
EXPORT_SYMBOL(ipipe_restore_pipeline_from);
1920
EXPORT_SYMBOL(ipipe_unstall_pipeline_head);
1921
EXPORT_SYMBOL(__ipipe_restore_pipeline_head);
1922
EXPORT_SYMBOL(__ipipe_unstall_root);
1923
EXPORT_SYMBOL(__ipipe_restore_root);
1924
EXPORT_SYMBOL(__ipipe_spin_lock_irq);
1925
EXPORT_SYMBOL(__ipipe_spin_unlock_irq);
1926
EXPORT_SYMBOL(__ipipe_spin_lock_irqsave);
1927
EXPORT_SYMBOL(__ipipe_spin_unlock_irqrestore);
1928
EXPORT_SYMBOL(__ipipe_pipeline);
1929
EXPORT_SYMBOL(__ipipe_lock_irq);
1930
EXPORT_SYMBOL(__ipipe_unlock_irq);
1931
EXPORT_SYMBOL(ipipe_register_domain);
1932
EXPORT_SYMBOL(ipipe_unregister_domain);
1933
EXPORT_SYMBOL(ipipe_free_virq);
1934
EXPORT_SYMBOL(ipipe_init_attr);
1935
EXPORT_SYMBOL(ipipe_catch_event);
1936
EXPORT_SYMBOL(ipipe_alloc_ptdkey);
1937
EXPORT_SYMBOL(ipipe_free_ptdkey);
1938
EXPORT_SYMBOL(ipipe_set_ptd);
1939
EXPORT_SYMBOL(ipipe_get_ptd);
1940
EXPORT_SYMBOL(ipipe_set_irq_affinity);
1941
EXPORT_SYMBOL(ipipe_send_ipi);
1942
EXPORT_SYMBOL(__ipipe_pend_irq);
1943
EXPORT_SYMBOL(__ipipe_set_irq_pending);
1944
#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP)
1945
EXPORT_SYMBOL(__ipipe_check_percpu_access);
1946
#endif
1947
#ifdef CONFIG_GENERIC_CLOCKEVENTS
1948
EXPORT_SYMBOL(ipipe_request_tickdev);
1949
EXPORT_SYMBOL(ipipe_release_tickdev);
1950
#endif
1951
1952
EXPORT_SYMBOL(ipipe_critical_enter);
1953
EXPORT_SYMBOL(ipipe_critical_exit);
1954
EXPORT_SYMBOL(ipipe_trigger_irq);
1955
EXPORT_SYMBOL(ipipe_get_sysinfo);
(-)a/kernel/ipipe/tracer.c (+1441 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * kernel/ipipe/tracer.c
3
 *
4
 * Copyright (C) 2005 Luotao Fu.
5
 *               2005-2008 Jan Kiszka.
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
10
 * USA; either version 2 of the License, or (at your option) any later
11
 * version.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21
 */
22
23
#include <linux/kernel.h>
24
#include <linux/module.h>
25
#include <linux/version.h>
26
#include <linux/kallsyms.h>
27
#include <linux/seq_file.h>
28
#include <linux/proc_fs.h>
29
#include <linux/ctype.h>
30
#include <linux/vmalloc.h>
31
#include <linux/pid.h>
32
#include <linux/utsrelease.h>
33
#include <linux/sched.h>
34
#include <linux/ipipe.h>
35
#include <linux/ftrace.h>
36
#include <asm/uaccess.h>
37
38
#define IPIPE_TRACE_PATHS           4 /* <!> Do not lower below 3 */
39
#define IPIPE_DEFAULT_ACTIVE        0
40
#define IPIPE_DEFAULT_MAX           1
41
#define IPIPE_DEFAULT_FROZEN        2
42
43
#define IPIPE_TRACE_POINTS          (1 << CONFIG_IPIPE_TRACE_SHIFT)
44
#define WRAP_POINT_NO(point)        ((point) & (IPIPE_TRACE_POINTS-1))
45
46
#define IPIPE_DEFAULT_PRE_TRACE     10
47
#define IPIPE_DEFAULT_POST_TRACE    10
48
#define IPIPE_DEFAULT_BACK_TRACE    100
49
50
#define IPIPE_DELAY_NOTE            1000  /* in nanoseconds */
51
#define IPIPE_DELAY_WARN            10000 /* in nanoseconds */
52
53
#define IPIPE_TFLG_NMI_LOCK         0x0001
54
#define IPIPE_TFLG_NMI_HIT          0x0002
55
#define IPIPE_TFLG_NMI_FREEZE_REQ   0x0004
56
57
#define IPIPE_TFLG_HWIRQ_OFF        0x0100
58
#define IPIPE_TFLG_FREEZING         0x0200
59
#define IPIPE_TFLG_CURRDOM_SHIFT    10   /* bits 10..11: current domain */
60
#define IPIPE_TFLG_CURRDOM_MASK     0x0C00
61
#define IPIPE_TFLG_DOMSTATE_SHIFT   12   /* bits 12..15: domain stalled? */
62
#define IPIPE_TFLG_DOMSTATE_BITS    3
63
64
#define IPIPE_TFLG_DOMAIN_STALLED(point, n) \
65
	(point->flags & (1 << (n + IPIPE_TFLG_DOMSTATE_SHIFT)))
66
#define IPIPE_TFLG_CURRENT_DOMAIN(point) \
67
	((point->flags & IPIPE_TFLG_CURRDOM_MASK) >> IPIPE_TFLG_CURRDOM_SHIFT)
68
69
struct ipipe_trace_point {
70
	short type;
71
	short flags;
72
	unsigned long eip;
73
	unsigned long parent_eip;
74
	unsigned long v;
75
	unsigned long long timestamp;
76
};
77
78
struct ipipe_trace_path {
79
	volatile int flags;
80
	int dump_lock; /* separated from flags due to cross-cpu access */
81
	int trace_pos; /* next point to fill */
82
	int begin, end; /* finalised path begin and end */
83
	int post_trace; /* non-zero when in post-trace phase */
84
	unsigned long long length; /* max path length in cycles */
85
	unsigned long nmi_saved_eip; /* for deferred requests from NMIs */
86
	unsigned long nmi_saved_parent_eip;
87
	unsigned long nmi_saved_v;
88
	struct ipipe_trace_point point[IPIPE_TRACE_POINTS];
89
} ____cacheline_aligned_in_smp;
90
91
enum ipipe_trace_type
92
{
93
	IPIPE_TRACE_FUNC = 0,
94
	IPIPE_TRACE_BEGIN,
95
	IPIPE_TRACE_END,
96
	IPIPE_TRACE_FREEZE,
97
	IPIPE_TRACE_SPECIAL,
98
	IPIPE_TRACE_PID,
99
	IPIPE_TRACE_EVENT,
100
};
101
102
#define IPIPE_TYPE_MASK             0x0007
103
#define IPIPE_TYPE_BITS             3
104
105
#ifdef CONFIG_IPIPE_TRACE_VMALLOC
106
static DEFINE_PER_CPU(struct ipipe_trace_path *, trace_path);
107
#else /* !CONFIG_IPIPE_TRACE_VMALLOC */
108
static DEFINE_PER_CPU(struct ipipe_trace_path, trace_path[IPIPE_TRACE_PATHS]) =
109
	{ [0 ... IPIPE_TRACE_PATHS-1] = { .begin = -1, .end = -1 } };
110
#endif /* CONFIG_IPIPE_TRACE_VMALLOC */
111
112
int ipipe_trace_enable = 0;
113
114
static DEFINE_PER_CPU(int, active_path) = { IPIPE_DEFAULT_ACTIVE };
115
static DEFINE_PER_CPU(int, max_path) = { IPIPE_DEFAULT_MAX };
116
static DEFINE_PER_CPU(int, frozen_path) = { IPIPE_DEFAULT_FROZEN };
117
static IPIPE_DEFINE_SPINLOCK(global_path_lock);
118
static int pre_trace = IPIPE_DEFAULT_PRE_TRACE;
119
static int post_trace = IPIPE_DEFAULT_POST_TRACE;
120
static int back_trace = IPIPE_DEFAULT_BACK_TRACE;
121
static int verbose_trace = 1;
122
static unsigned long trace_overhead;
123
124
static unsigned long trigger_begin;
125
static unsigned long trigger_end;
126
127
static DEFINE_MUTEX(out_mutex);
128
static struct ipipe_trace_path *print_path;
129
#ifdef CONFIG_IPIPE_TRACE_PANIC
130
static struct ipipe_trace_path *panic_path;
131
#endif /* CONFIG_IPIPE_TRACE_PANIC */
132
static int print_pre_trace;
133
static int print_post_trace;
134
135
136
static long __ipipe_signed_tsc2us(long long tsc);
137
static void
138
__ipipe_trace_point_type(char *buf, struct ipipe_trace_point *point);
139
static void __ipipe_print_symname(struct seq_file *m, unsigned long eip);
140
141
142
static notrace void
143
__ipipe_store_domain_states(struct ipipe_trace_point *point)
144
{
145
	struct ipipe_domain *ipd;
146
	struct list_head *pos;
147
	int i = 0;
148
149
	list_for_each_prev(pos, &__ipipe_pipeline) {
150
		ipd = list_entry(pos, struct ipipe_domain, p_link);
151
152
		if (test_bit(IPIPE_STALL_FLAG, &ipipe_cpudom_var(ipd, status)))
153
			point->flags |= 1 << (i + IPIPE_TFLG_DOMSTATE_SHIFT);
154
155
		if (ipd == __ipipe_current_domain)
156
			point->flags |= i << IPIPE_TFLG_CURRDOM_SHIFT;
157
158
		if (++i > IPIPE_TFLG_DOMSTATE_BITS)
159
			break;
160
	}
161
}
162
163
static notrace int __ipipe_get_free_trace_path(int old, int cpu)
164
{
165
	int new_active = old;
166
	struct ipipe_trace_path *tp;
167
168
	do {
169
		if (++new_active == IPIPE_TRACE_PATHS)
170
			new_active = 0;
171
		tp = &per_cpu(trace_path, cpu)[new_active];
172
	} while (new_active == per_cpu(max_path, cpu) ||
173
	         new_active == per_cpu(frozen_path, cpu) ||
174
	         tp->dump_lock);
175
176
	return new_active;
177
}
178
179
static notrace void
180
__ipipe_migrate_pre_trace(struct ipipe_trace_path *new_tp,
181
                          struct ipipe_trace_path *old_tp, int old_pos)
182
{
183
	int i;
184
185
	new_tp->trace_pos = pre_trace+1;
186
187
	for (i = new_tp->trace_pos; i > 0; i--)
188
		memcpy(&new_tp->point[WRAP_POINT_NO(new_tp->trace_pos-i)],
189
		       &old_tp->point[WRAP_POINT_NO(old_pos-i)],
190
		       sizeof(struct ipipe_trace_point));
191
192
	/* mark the end (i.e. the point before point[0]) invalid */
193
	new_tp->point[IPIPE_TRACE_POINTS-1].eip = 0;
194
}
195
196
static notrace struct ipipe_trace_path *
197
__ipipe_trace_end(int cpu, struct ipipe_trace_path *tp, int pos)
198
{
199
	struct ipipe_trace_path *old_tp = tp;
200
	long active = per_cpu(active_path, cpu);
201
	unsigned long long length;
202
203
	/* do we have a new worst case? */
204
	length = tp->point[tp->end].timestamp -
205
	         tp->point[tp->begin].timestamp;
206
	if (length > per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)].length) {
207
		/* we need protection here against other cpus trying
208
		   to start a proc dump */
209
		spin_lock(&global_path_lock);
210
211
		/* active path holds new worst case */
212
		tp->length = length;
213
		per_cpu(max_path, cpu) = active;
214
215
		/* find next unused trace path */
216
		active = __ipipe_get_free_trace_path(active, cpu);
217
218
		spin_unlock(&global_path_lock);
219
220
		tp = &per_cpu(trace_path, cpu)[active];
221
222
		/* migrate last entries for pre-tracing */
223
		__ipipe_migrate_pre_trace(tp, old_tp, pos);
224
	}
225
226
	return tp;
227
}
228
229
static notrace struct ipipe_trace_path *
230
__ipipe_trace_freeze(int cpu, struct ipipe_trace_path *tp, int pos)
231
{
232
	struct ipipe_trace_path *old_tp = tp;
233
	long active = per_cpu(active_path, cpu);
234
	int n;
235
236
	/* frozen paths have no core (begin=end) */
237
	tp->begin = tp->end;
238
239
	/* we need protection here against other cpus trying
240
	 * to set their frozen path or to start a proc dump */
241
	spin_lock(&global_path_lock);
242
243
	per_cpu(frozen_path, cpu) = active;
244
245
	/* find next unused trace path */
246
	active = __ipipe_get_free_trace_path(active, cpu);
247
248
	/* check if this is the first frozen path */
249
	for_each_possible_cpu(n) {
250
		if (n != cpu &&
251
		    per_cpu(trace_path, n)[per_cpu(frozen_path, n)].end >= 0)
252
			tp->end = -1;
253
	}
254
255
	spin_unlock(&global_path_lock);
256
257
	tp = &per_cpu(trace_path, cpu)[active];
258
259
	/* migrate last entries for pre-tracing */
260
	__ipipe_migrate_pre_trace(tp, old_tp, pos);
261
262
	return tp;
263
}
264
265
void notrace
266
__ipipe_trace(enum ipipe_trace_type type, unsigned long eip,
267
              unsigned long parent_eip, unsigned long v)
268
{
269
	struct ipipe_trace_path *tp, *old_tp;
270
	int pos, next_pos, begin;
271
	struct ipipe_trace_point *point;
272
	unsigned long flags;
273
	int cpu;
274
275
	local_irq_save_hw_notrace(flags);
276
277
	cpu = ipipe_processor_id();
278
 restart:
279
	tp = old_tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
280
281
	/* here starts a race window with NMIs - catched below */
282
283
	/* check for NMI recursion */
284
	if (unlikely(tp->flags & IPIPE_TFLG_NMI_LOCK)) {
285
		tp->flags |= IPIPE_TFLG_NMI_HIT;
286
287
		/* first freeze request from NMI context? */
288
		if ((type == IPIPE_TRACE_FREEZE) &&
289
		    !(tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ)) {
290
			/* save arguments and mark deferred freezing */
291
			tp->flags |= IPIPE_TFLG_NMI_FREEZE_REQ;
292
			tp->nmi_saved_eip = eip;
293
			tp->nmi_saved_parent_eip = parent_eip;
294
			tp->nmi_saved_v = v;
295
		}
296
		return; /* no need for restoring flags inside IRQ */
297
	}
298
299
	/* clear NMI events and set lock (atomically per cpu) */
300
	tp->flags = (tp->flags & ~(IPIPE_TFLG_NMI_HIT |
301
	                           IPIPE_TFLG_NMI_FREEZE_REQ))
302
	                       | IPIPE_TFLG_NMI_LOCK;
303
304
	/* check active_path again - some nasty NMI may have switched
305
	 * it meanwhile */
306
	if (unlikely(tp !=
307
		     &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)])) {
308
		/* release lock on wrong path and restart */
309
		tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
310
311
		/* there is no chance that the NMI got deferred
312
		 * => no need to check for pending freeze requests */
313
		goto restart;
314
	}
315
316
	/* get the point buffer */
317
	pos = tp->trace_pos;
318
	point = &tp->point[pos];
319
320
	/* store all trace point data */
321
	point->type = type;
322
	point->flags = raw_irqs_disabled_flags(flags) ? IPIPE_TFLG_HWIRQ_OFF : 0;
323
	point->eip = eip;
324
	point->parent_eip = parent_eip;
325
	point->v = v;
326
	ipipe_read_tsc(point->timestamp);
327
328
	__ipipe_store_domain_states(point);
329
330
	/* forward to next point buffer */
331
	next_pos = WRAP_POINT_NO(pos+1);
332
	tp->trace_pos = next_pos;
333
334
	/* only mark beginning if we haven't started yet */
335
	begin = tp->begin;
336
	if (unlikely(type == IPIPE_TRACE_BEGIN) && (begin < 0))
337
		tp->begin = pos;
338
339
	/* end of critical path, start post-trace if not already started */
340
	if (unlikely(type == IPIPE_TRACE_END) &&
341
	    (begin >= 0) && !tp->post_trace)
342
		tp->post_trace = post_trace + 1;
343
344
	/* freeze only if the slot is free and we are not already freezing */
345
	if ((unlikely(type == IPIPE_TRACE_FREEZE) ||
346
	     (unlikely(eip >= trigger_begin && eip <= trigger_end) &&
347
	     type == IPIPE_TRACE_FUNC)) &&
348
	    per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)].begin < 0 &&
349
	    !(tp->flags & IPIPE_TFLG_FREEZING)) {
350
		tp->post_trace = post_trace + 1;
351
		tp->flags |= IPIPE_TFLG_FREEZING;
352
	}
353
354
	/* enforce end of trace in case of overflow */
355
	if (unlikely(WRAP_POINT_NO(next_pos + 1) == begin)) {
356
		tp->end = pos;
357
		goto enforce_end;
358
	}
359
360
	/* stop tracing this path if we are in post-trace and
361
	 *  a) that phase is over now or
362
	 *  b) a new TRACE_BEGIN came in but we are not freezing this path */
363
	if (unlikely((tp->post_trace > 0) && ((--tp->post_trace == 0) ||
364
	             ((type == IPIPE_TRACE_BEGIN) &&
365
	              !(tp->flags & IPIPE_TFLG_FREEZING))))) {
366
		/* store the path's end (i.e. excluding post-trace) */
367
		tp->end = WRAP_POINT_NO(pos - post_trace + tp->post_trace);
368
369
 enforce_end:
370
		if (tp->flags & IPIPE_TFLG_FREEZING)
371
			tp = __ipipe_trace_freeze(cpu, tp, pos);
372
		else
373
			tp = __ipipe_trace_end(cpu, tp, pos);
374
375
		/* reset the active path, maybe already start a new one */
376
		tp->begin = (type == IPIPE_TRACE_BEGIN) ?
377
			WRAP_POINT_NO(tp->trace_pos - 1) : -1;
378
		tp->end = -1;
379
		tp->post_trace = 0;
380
		tp->flags = 0;
381
382
		/* update active_path not earlier to avoid races with NMIs */
383
		per_cpu(active_path, cpu) = tp - per_cpu(trace_path, cpu);
384
	}
385
386
	/* we still have old_tp and point,
387
	 * let's reset NMI lock and check for catches */
388
	old_tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
389
	if (unlikely(old_tp->flags & IPIPE_TFLG_NMI_HIT)) {
390
		/* well, this late tagging may not immediately be visible for
391
		 * other cpus already dumping this path - a minor issue */
392
		point->flags |= IPIPE_TFLG_NMI_HIT;
393
394
		/* handle deferred freezing from NMI context */
395
		if (old_tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ)
396
			__ipipe_trace(IPIPE_TRACE_FREEZE, old_tp->nmi_saved_eip,
397
			              old_tp->nmi_saved_parent_eip,
398
			              old_tp->nmi_saved_v);
399
	}
400
401
	local_irq_restore_hw_notrace(flags);
402
}
403
404
static unsigned long __ipipe_global_path_lock(void)
405
{
406
	unsigned long flags;
407
	int cpu;
408
	struct ipipe_trace_path *tp;
409
410
	spin_lock_irqsave(&global_path_lock, flags);
411
412
	cpu = ipipe_processor_id();
413
 restart:
414
	tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
415
416
	/* here is small race window with NMIs - catched below */
417
418
	/* clear NMI events and set lock (atomically per cpu) */
419
	tp->flags = (tp->flags & ~(IPIPE_TFLG_NMI_HIT |
420
	                           IPIPE_TFLG_NMI_FREEZE_REQ))
421
	                       | IPIPE_TFLG_NMI_LOCK;
422
423
	/* check active_path again - some nasty NMI may have switched
424
	 * it meanwhile */
425
	if (tp != &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)]) {
426
		/* release lock on wrong path and restart */
427
		tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
428
429
		/* there is no chance that the NMI got deferred
430
		 * => no need to check for pending freeze requests */
431
		goto restart;
432
	}
433
434
	return flags;
435
}
436
437
static void __ipipe_global_path_unlock(unsigned long flags)
438
{
439
	int cpu;
440
	struct ipipe_trace_path *tp;
441
442
	/* release spinlock first - it's not involved in the NMI issue */
443
	__ipipe_spin_unlock_irqbegin(&global_path_lock);
444
445
	cpu = ipipe_processor_id();
446
	tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
447
448
	tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
449
450
	/* handle deferred freezing from NMI context */
451
	if (tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ)
452
		__ipipe_trace(IPIPE_TRACE_FREEZE, tp->nmi_saved_eip,
453
		              tp->nmi_saved_parent_eip, tp->nmi_saved_v);
454
455
	/* See __ipipe_spin_lock_irqsave() and friends. */
456
	__ipipe_spin_unlock_irqcomplete(flags);
457
}
458
459
void notrace ipipe_trace_begin(unsigned long v)
460
{
461
	if (!ipipe_trace_enable)
462
		return;
463
	__ipipe_trace(IPIPE_TRACE_BEGIN, __BUILTIN_RETURN_ADDRESS0,
464
	              __BUILTIN_RETURN_ADDRESS1, v);
465
}
466
EXPORT_SYMBOL(ipipe_trace_begin);
467
468
void notrace ipipe_trace_end(unsigned long v)
469
{
470
	if (!ipipe_trace_enable)
471
		return;
472
	__ipipe_trace(IPIPE_TRACE_END, __BUILTIN_RETURN_ADDRESS0,
473
	              __BUILTIN_RETURN_ADDRESS1, v);
474
}
475
EXPORT_SYMBOL(ipipe_trace_end);
476
477
void notrace ipipe_trace_freeze(unsigned long v)
478
{
479
	if (!ipipe_trace_enable)
480
		return;
481
	__ipipe_trace(IPIPE_TRACE_FREEZE, __BUILTIN_RETURN_ADDRESS0,
482
	              __BUILTIN_RETURN_ADDRESS1, v);
483
}
484
EXPORT_SYMBOL(ipipe_trace_freeze);
485
486
void notrace ipipe_trace_special(unsigned char id, unsigned long v)
487
{
488
	if (!ipipe_trace_enable)
489
		return;
490
	__ipipe_trace(IPIPE_TRACE_SPECIAL | (id << IPIPE_TYPE_BITS),
491
	              __BUILTIN_RETURN_ADDRESS0,
492
	              __BUILTIN_RETURN_ADDRESS1, v);
493
}
494
EXPORT_SYMBOL(ipipe_trace_special);
495
496
void notrace ipipe_trace_pid(pid_t pid, short prio)
497
{
498
	if (!ipipe_trace_enable)
499
		return;
500
	__ipipe_trace(IPIPE_TRACE_PID | (prio << IPIPE_TYPE_BITS),
501
	              __BUILTIN_RETURN_ADDRESS0,
502
	              __BUILTIN_RETURN_ADDRESS1, pid);
503
}
504
EXPORT_SYMBOL(ipipe_trace_pid);
505
506
void notrace ipipe_trace_event(unsigned char id, unsigned long delay_tsc)
507
{
508
	if (!ipipe_trace_enable)
509
		return;
510
	__ipipe_trace(IPIPE_TRACE_EVENT | (id << IPIPE_TYPE_BITS),
511
	              __BUILTIN_RETURN_ADDRESS0,
512
	              __BUILTIN_RETURN_ADDRESS1, delay_tsc);
513
}
514
EXPORT_SYMBOL(ipipe_trace_event);
515
516
int ipipe_trace_max_reset(void)
517
{
518
	int cpu;
519
	unsigned long flags;
520
	struct ipipe_trace_path *path;
521
	int ret = 0;
522
523
	flags = __ipipe_global_path_lock();
524
525
	for_each_possible_cpu(cpu) {
526
		path = &per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)];
527
528
		if (path->dump_lock) {
529
			ret = -EBUSY;
530
			break;
531
		}
532
533
		path->begin     = -1;
534
		path->end       = -1;
535
		path->trace_pos = 0;
536
		path->length    = 0;
537
	}
538
539
	__ipipe_global_path_unlock(flags);
540
541
	return ret;
542
}
543
EXPORT_SYMBOL(ipipe_trace_max_reset);
544
545
int ipipe_trace_frozen_reset(void)
546
{
547
	int cpu;
548
	unsigned long flags;
549
	struct ipipe_trace_path *path;
550
	int ret = 0;
551
552
	flags = __ipipe_global_path_lock();
553
554
	for_each_online_cpu(cpu) {
555
		path = &per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)];
556
557
		if (path->dump_lock) {
558
			ret = -EBUSY;
559
			break;
560
		}
561
562
		path->begin = -1;
563
		path->end = -1;
564
		path->trace_pos = 0;
565
		path->length    = 0;
566
	}
567
568
	__ipipe_global_path_unlock(flags);
569
570
	return ret;
571
}
572
EXPORT_SYMBOL(ipipe_trace_frozen_reset);
573
574
static void
575
__ipipe_get_task_info(char *task_info, struct ipipe_trace_point *point,
576
                      int trylock)
577
{
578
	struct task_struct *task = NULL;
579
	char buf[8];
580
	int i;
581
	int locked = 1;
582
583
	if (trylock) {
584
		if (!read_trylock(&tasklist_lock))
585
			locked = 0;
586
	} else
587
		read_lock(&tasklist_lock);
588
589
	if (locked)
590
		task = find_task_by_pid_ns((pid_t)point->v, &init_pid_ns);
591
592
	if (task)
593
		strncpy(task_info, task->comm, 11);
594
	else
595
		strcpy(task_info, "-<?>-");
596
597
	if (locked)
598
		read_unlock(&tasklist_lock);
599
600
	for (i = strlen(task_info); i < 11; i++)
601
		task_info[i] = ' ';
602
603
	sprintf(buf, " %d ", point->type >> IPIPE_TYPE_BITS);
604
	strcpy(task_info + (11 - strlen(buf)), buf);
605
}
606
607
static void
608
__ipipe_get_event_date(char *buf,struct ipipe_trace_path *path,
609
		       struct ipipe_trace_point *point)
610
{
611
	long time;
612
	int type;
613
614
	time = __ipipe_signed_tsc2us(point->timestamp -
615
				     path->point[path->begin].timestamp + point->v);
616
	type = point->type >> IPIPE_TYPE_BITS;
617
618
	if (type == 0)
619
		/*
620
		 * Event type #0 is predefined, stands for the next
621
		 * timer tick.
622
		 */
623
		sprintf(buf, "tick@%-6ld", time);
624
	else
625
		sprintf(buf, "%3d@%-7ld", type, time);
626
}
627
628
#ifdef CONFIG_IPIPE_TRACE_PANIC
629
void ipipe_trace_panic_freeze(void)
630
{
631
	unsigned long flags;
632
	int cpu;
633
634
	if (!ipipe_trace_enable)
635
		return;
636
637
	ipipe_trace_enable = 0;
638
	local_irq_save_hw_notrace(flags);
639
640
	cpu = ipipe_processor_id();
641
642
	panic_path = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
643
644
	local_irq_restore_hw(flags);
645
}
646
EXPORT_SYMBOL(ipipe_trace_panic_freeze);
647
648
void ipipe_trace_panic_dump(void)
649
{
650
	int cnt = back_trace;
651
	int start, pos;
652
	char buf[16];
653
654
	if (!panic_path)
655
		return;
656
657
	ipipe_context_check_off();
658
659
	printk("I-pipe tracer log (%d points):\n", cnt);
660
661
	start = pos = WRAP_POINT_NO(panic_path->trace_pos-1);
662
663
	while (cnt-- > 0) {
664
		struct ipipe_trace_point *point = &panic_path->point[pos];
665
		long time;
666
		char info[16];
667
		int i;
668
669
		printk(" %c",
670
		       (point->flags & IPIPE_TFLG_HWIRQ_OFF) ? '|' : ' ');
671
672
		for (i = IPIPE_TFLG_DOMSTATE_BITS; i >= 0; i--)
673
			printk("%c",
674
			       (IPIPE_TFLG_CURRENT_DOMAIN(point) == i) ?
675
				(IPIPE_TFLG_DOMAIN_STALLED(point, i) ?
676
					'#' : '+') :
677
				(IPIPE_TFLG_DOMAIN_STALLED(point, i) ?
678
					'*' : ' '));
679
680
		if (!point->eip)
681
			printk("-<invalid>-\n");
682
		else {
683
			__ipipe_trace_point_type(buf, point);
684
			printk("%s", buf);
685
686
			switch (point->type & IPIPE_TYPE_MASK) {
687
				case IPIPE_TRACE_FUNC:
688
					printk("           ");
689
					break;
690
691
				case IPIPE_TRACE_PID:
692
					__ipipe_get_task_info(info,
693
							      point, 1);
694
					printk("%s", info);
695
					break;
696
697
				case IPIPE_TRACE_EVENT:
698
					__ipipe_get_event_date(info,
699
							       panic_path, point);
700
					printk("%s", info);
701
					break;
702
703
				default:
704
					printk("0x%08lx ", point->v);
705
			}
706
707
			time = __ipipe_signed_tsc2us(point->timestamp -
708
				panic_path->point[start].timestamp);
709
			printk(" %5ld ", time);
710
711
			__ipipe_print_symname(NULL, point->eip);
712
			printk(" (");
713
			__ipipe_print_symname(NULL, point->parent_eip);
714
			printk(")\n");
715
		}
716
		pos = WRAP_POINT_NO(pos - 1);
717
	}
718
719
	panic_path = NULL;
720
}
721
EXPORT_SYMBOL(ipipe_trace_panic_dump);
722
#endif /* CONFIG_IPIPE_TRACE_PANIC */
723
724
725
/* --- /proc output --- */
726
727
static notrace int __ipipe_in_critical_trpath(long point_no)
728
{
729
	return ((WRAP_POINT_NO(point_no-print_path->begin) <
730
	         WRAP_POINT_NO(print_path->end-print_path->begin)) ||
731
	        ((print_path->end == print_path->begin) &&
732
	         (WRAP_POINT_NO(point_no-print_path->end) >
733
	          print_post_trace)));
734
}
735
736
static long __ipipe_signed_tsc2us(long long tsc)
737
{
738
        unsigned long long abs_tsc;
739
        long us;
740
741
	/* ipipe_tsc2us works on unsigned => handle sign separately */
742
        abs_tsc = (tsc >= 0) ? tsc : -tsc;
743
        us = ipipe_tsc2us(abs_tsc);
744
        if (tsc < 0)
745
                return -us;
746
        else
747
                return us;
748
}
749
750
static void
751
__ipipe_trace_point_type(char *buf, struct ipipe_trace_point *point)
752
{
753
	switch (point->type & IPIPE_TYPE_MASK) {
754
		case IPIPE_TRACE_FUNC:
755
			strcpy(buf, "func    ");
756
			break;
757
758
		case IPIPE_TRACE_BEGIN:
759
			strcpy(buf, "begin   ");
760
			break;
761
762
		case IPIPE_TRACE_END:
763
			strcpy(buf, "end     ");
764
			break;
765
766
		case IPIPE_TRACE_FREEZE:
767
			strcpy(buf, "freeze  ");
768
			break;
769
770
		case IPIPE_TRACE_SPECIAL:
771
			sprintf(buf, "(0x%02x)  ",
772
				point->type >> IPIPE_TYPE_BITS);
773
			break;
774
775
		case IPIPE_TRACE_PID:
776
			sprintf(buf, "[%5d] ", (pid_t)point->v);
777
			break;
778
779
		case IPIPE_TRACE_EVENT:
780
			sprintf(buf, "event   ");
781
			break;
782
	}
783
}
784
785
static void
786
__ipipe_print_pathmark(struct seq_file *m, struct ipipe_trace_point *point)
787
{
788
	char mark = ' ';
789
	int point_no = point - print_path->point;
790
	int i;
791
792
	if (print_path->end == point_no)
793
		mark = '<';
794
	else if (print_path->begin == point_no)
795
		mark = '>';
796
	else if (__ipipe_in_critical_trpath(point_no))
797
		mark = ':';
798
	seq_printf(m, "%c%c", mark,
799
	           (point->flags & IPIPE_TFLG_HWIRQ_OFF) ? '|' : ' ');
800
801
	if (!verbose_trace)
802
		return;
803
804
	for (i = IPIPE_TFLG_DOMSTATE_BITS; i >= 0; i--)
805
		seq_printf(m, "%c",
806
			(IPIPE_TFLG_CURRENT_DOMAIN(point) == i) ?
807
			    (IPIPE_TFLG_DOMAIN_STALLED(point, i) ?
808
				'#' : '+') :
809
			(IPIPE_TFLG_DOMAIN_STALLED(point, i) ? '*' : ' '));
810
}
811
812
static void
813
__ipipe_print_delay(struct seq_file *m, struct ipipe_trace_point *point)
814
{
815
	unsigned long delay = 0;
816
	int next;
817
	char *mark = "  ";
818
819
	next = WRAP_POINT_NO(point+1 - print_path->point);
820
821
	if (next != print_path->trace_pos)
822
		delay = ipipe_tsc2ns(print_path->point[next].timestamp -
823
		                     point->timestamp);
824
825
	if (__ipipe_in_critical_trpath(point - print_path->point)) {
826
		if (delay > IPIPE_DELAY_WARN)
827
			mark = "! ";
828
		else if (delay > IPIPE_DELAY_NOTE)
829
			mark = "+ ";
830
	}
831
	seq_puts(m, mark);
832
833
	if (verbose_trace)
834
		seq_printf(m, "%3lu.%03lu%c ", delay/1000, delay%1000,
835
		           (point->flags & IPIPE_TFLG_NMI_HIT) ? 'N' : ' ');
836
	else
837
		seq_puts(m, " ");
838
}
839
840
static void __ipipe_print_symname(struct seq_file *m, unsigned long eip)
841
{
842
	char namebuf[KSYM_NAME_LEN+1];
843
	unsigned long size, offset;
844
	const char *sym_name;
845
	char *modname;
846
847
	sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf);
848
849
#ifdef CONFIG_IPIPE_TRACE_PANIC
850
	if (!m) {
851
		/* panic dump */
852
		if (sym_name) {
853
			printk("%s+0x%lx", sym_name, offset);
854
			if (modname)
855
				printk(" [%s]", modname);
856
		}
857
	} else
858
#endif /* CONFIG_IPIPE_TRACE_PANIC */
859
	{
860
		if (sym_name) {
861
			if (verbose_trace) {
862
				seq_printf(m, "%s+0x%lx", sym_name, offset);
863
				if (modname)
864
					seq_printf(m, " [%s]", modname);
865
			} else
866
				seq_puts(m, sym_name);
867
		} else
868
			seq_printf(m, "<%08lx>", eip);
869
	}
870
}
871
872
static void __ipipe_print_headline(struct seq_file *m)
873
{
874
	seq_printf(m, "Calibrated minimum trace-point overhead: %lu.%03lu "
875
		   "us\n\n", trace_overhead/1000, trace_overhead%1000);
876
877
	if (verbose_trace) {
878
		const char *name[4] = { [0 ... 3] = "<unused>" };
879
		struct list_head *pos;
880
		int i = 0;
881
882
		list_for_each_prev(pos, &__ipipe_pipeline) {
883
			struct ipipe_domain *ipd =
884
				list_entry(pos, struct ipipe_domain, p_link);
885
886
			name[i] = ipd->name;
887
			if (++i > 3)
888
				break;
889
		}
890
891
		seq_printf(m,
892
		           " +----- Hard IRQs ('|': locked)\n"
893
		           " |+---- %s\n"
894
		           " ||+--- %s\n"
895
		           " |||+-- %s\n"
896
		           " ||||+- %s%s\n"
897
		           " |||||                        +---------- "
898
		               "Delay flag ('+': > %d us, '!': > %d us)\n"
899
		           " |||||                        |        +- "
900
		               "NMI noise ('N')\n"
901
		           " |||||                        |        |\n"
902
		           "      Type    User Val.   Time    Delay  Function "
903
		               "(Parent)\n",
904
		           name[3], name[2], name[1], name[0],
905
		           name[0] ? " ('*': domain stalled, '+': current, "
906
		               "'#': current+stalled)" : "",
907
		           IPIPE_DELAY_NOTE/1000, IPIPE_DELAY_WARN/1000);
908
	} else
909
		seq_printf(m,
910
		           " +--------------- Hard IRQs ('|': locked)\n"
911
		           " |             +- Delay flag "
912
		               "('+': > %d us, '!': > %d us)\n"
913
		           " |             |\n"
914
		           "  Type     Time   Function (Parent)\n",
915
		           IPIPE_DELAY_NOTE/1000, IPIPE_DELAY_WARN/1000);
916
}
917
918
static void *__ipipe_max_prtrace_start(struct seq_file *m, loff_t *pos)
919
{
920
	loff_t n = *pos;
921
922
	mutex_lock(&out_mutex);
923
924
	if (!n) {
925
		struct ipipe_trace_path *tp;
926
		unsigned long length_usecs;
927
		int points, cpu;
928
		unsigned long flags;
929
930
		/* protect against max_path/frozen_path updates while we
931
		 * haven't locked our target path, also avoid recursively
932
		 * taking global_path_lock from NMI context */
933
		flags = __ipipe_global_path_lock();
934
935
		/* find the longest of all per-cpu paths */
936
		print_path = NULL;
937
		for_each_online_cpu(cpu) {
938
			tp = &per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)];
939
			if ((print_path == NULL) ||
940
			    (tp->length > print_path->length)) {
941
				print_path = tp;
942
				break;
943
			}
944
		}
945
		print_path->dump_lock = 1;
946
947
		__ipipe_global_path_unlock(flags);
948
949
		/* does this path actually contain data? */
950
		if (print_path->end == print_path->begin)
951
			return NULL;
952
953
		/* number of points inside the critical path */
954
		points = WRAP_POINT_NO(print_path->end-print_path->begin+1);
955
956
		/* pre- and post-tracing length, post-trace length was frozen
957
		   in __ipipe_trace, pre-trace may have to be reduced due to
958
		   buffer overrun */
959
		print_pre_trace  = pre_trace;
960
		print_post_trace = WRAP_POINT_NO(print_path->trace_pos -
961
		                                 print_path->end - 1);
962
		if (points+pre_trace+print_post_trace > IPIPE_TRACE_POINTS - 1)
963
			print_pre_trace = IPIPE_TRACE_POINTS - 1 - points -
964
				print_post_trace;
965
966
		length_usecs = ipipe_tsc2us(print_path->length);
967
		seq_printf(m, "I-pipe worst-case tracing service on %s/ipipe-%s\n"
968
			"------------------------------------------------------------\n",
969
			UTS_RELEASE, IPIPE_ARCH_STRING);
970
		seq_printf(m, "CPU: %d, Begin: %lld cycles, Trace Points: "
971
			"%d (-%d/+%d), Length: %lu us\n",
972
			cpu, print_path->point[print_path->begin].timestamp,
973
			points, print_pre_trace, print_post_trace, length_usecs);
974
		__ipipe_print_headline(m);
975
	}
976
977
	/* check if we are inside the trace range */
978
	if (n >= WRAP_POINT_NO(print_path->end - print_path->begin + 1 +
979
	                       print_pre_trace + print_post_trace))
980
		return NULL;
981
982
	/* return the next point to be shown */
983
	return &print_path->point[WRAP_POINT_NO(print_path->begin -
984
	                                        print_pre_trace + n)];
985
}
986
987
static void *__ipipe_prtrace_next(struct seq_file *m, void *p, loff_t *pos)
988
{
989
	loff_t n = ++*pos;
990
991
	/* check if we are inside the trace range with the next entry */
992
	if (n >= WRAP_POINT_NO(print_path->end - print_path->begin + 1 +
993
	                       print_pre_trace + print_post_trace))
994
		return NULL;
995
996
	/* return the next point to be shown */
997
	return &print_path->point[WRAP_POINT_NO(print_path->begin -
998
	                                        print_pre_trace + *pos)];
999
}
1000
1001
static void __ipipe_prtrace_stop(struct seq_file *m, void *p)
1002
{
1003
	if (print_path)
1004
		print_path->dump_lock = 0;
1005
	mutex_unlock(&out_mutex);
1006
}
1007
1008
static int __ipipe_prtrace_show(struct seq_file *m, void *p)
1009
{
1010
	long time;
1011
	struct ipipe_trace_point *point = p;
1012
	char buf[16];
1013
1014
	if (!point->eip) {
1015
		seq_puts(m, "-<invalid>-\n");
1016
		return 0;
1017
	}
1018
1019
	__ipipe_print_pathmark(m, point);
1020
	__ipipe_trace_point_type(buf, point);
1021
	seq_puts(m, buf);
1022
	if (verbose_trace)
1023
		switch (point->type & IPIPE_TYPE_MASK) {
1024
			case IPIPE_TRACE_FUNC:
1025
				seq_puts(m, "           ");
1026
				break;
1027
1028
			case IPIPE_TRACE_PID:
1029
				__ipipe_get_task_info(buf, point, 0);
1030
				seq_puts(m, buf);
1031
				break;
1032
1033
			case IPIPE_TRACE_EVENT:
1034
				__ipipe_get_event_date(buf, print_path, point);
1035
				seq_puts(m, buf);
1036
				break;
1037
1038
			default:
1039
				seq_printf(m, "0x%08lx ", point->v);
1040
		}
1041
1042
	time = __ipipe_signed_tsc2us(point->timestamp -
1043
		print_path->point[print_path->begin].timestamp);
1044
	seq_printf(m, "%5ld", time);
1045
1046
	__ipipe_print_delay(m, point);
1047
	__ipipe_print_symname(m, point->eip);
1048
	seq_puts(m, " (");
1049
	__ipipe_print_symname(m, point->parent_eip);
1050
	seq_puts(m, ")\n");
1051
1052
	return 0;
1053
}
1054
1055
static struct seq_operations __ipipe_max_ptrace_ops = {
1056
	.start = __ipipe_max_prtrace_start,
1057
	.next  = __ipipe_prtrace_next,
1058
	.stop  = __ipipe_prtrace_stop,
1059
	.show  = __ipipe_prtrace_show
1060
};
1061
1062
static int __ipipe_max_prtrace_open(struct inode *inode, struct file *file)
1063
{
1064
	return seq_open(file, &__ipipe_max_ptrace_ops);
1065
}
1066
1067
static ssize_t
1068
__ipipe_max_reset(struct file *file, const char __user *pbuffer,
1069
                  size_t count, loff_t *data)
1070
{
1071
	mutex_lock(&out_mutex);
1072
	ipipe_trace_max_reset();
1073
	mutex_unlock(&out_mutex);
1074
1075
	return count;
1076
}
1077
1078
struct file_operations __ipipe_max_prtrace_fops = {
1079
	.open       = __ipipe_max_prtrace_open,
1080
	.read       = seq_read,
1081
	.write      = __ipipe_max_reset,
1082
	.llseek     = seq_lseek,
1083
	.release    = seq_release,
1084
};
1085
1086
static void *__ipipe_frozen_prtrace_start(struct seq_file *m, loff_t *pos)
1087
{
1088
	loff_t n = *pos;
1089
1090
	mutex_lock(&out_mutex);
1091
1092
	if (!n) {
1093
		struct ipipe_trace_path *tp;
1094
		int cpu;
1095
		unsigned long flags;
1096
1097
		/* protect against max_path/frozen_path updates while we
1098
		 * haven't locked our target path, also avoid recursively
1099
		 * taking global_path_lock from NMI context */
1100
		flags = __ipipe_global_path_lock();
1101
1102
		/* find the first of all per-cpu frozen paths */
1103
		print_path = NULL;
1104
		for_each_online_cpu(cpu) {
1105
			tp = &per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)];
1106
			if (tp->end >= 0) {
1107
				print_path = tp;
1108
				break;
1109
			}
1110
		}
1111
		if (print_path)
1112
			print_path->dump_lock = 1;
1113
1114
		__ipipe_global_path_unlock(flags);
1115
1116
		if (!print_path)
1117
			return NULL;
1118
1119
		/* back- and post-tracing length, post-trace length was frozen
1120
		   in __ipipe_trace, back-trace may have to be reduced due to
1121
		   buffer overrun */
1122
		print_pre_trace  = back_trace-1; /* substract freeze point */
1123
		print_post_trace = WRAP_POINT_NO(print_path->trace_pos -
1124
		                                 print_path->end - 1);
1125
		if (1+pre_trace+print_post_trace > IPIPE_TRACE_POINTS - 1)
1126
			print_pre_trace = IPIPE_TRACE_POINTS - 2 -
1127
				print_post_trace;
1128
1129
		seq_printf(m, "I-pipe frozen back-tracing service on %s/ipipe-%s\n"
1130
			"------------------------------------------------------"
1131
			"------\n",
1132
			UTS_RELEASE, IPIPE_ARCH_STRING);
1133
		seq_printf(m, "CPU: %d, Freeze: %lld cycles, Trace Points: %d (+%d)\n",
1134
			cpu, print_path->point[print_path->begin].timestamp,
1135
			print_pre_trace+1, print_post_trace);
1136
		__ipipe_print_headline(m);
1137
	}
1138
1139
	/* check if we are inside the trace range */
1140
	if (n >= print_pre_trace + 1 + print_post_trace)
1141
		return NULL;
1142
1143
	/* return the next point to be shown */
1144
	return &print_path->point[WRAP_POINT_NO(print_path->begin-
1145
	                                        print_pre_trace+n)];
1146
}
1147
1148
static struct seq_operations __ipipe_frozen_ptrace_ops = {
1149
	.start = __ipipe_frozen_prtrace_start,
1150
	.next  = __ipipe_prtrace_next,
1151
	.stop  = __ipipe_prtrace_stop,
1152
	.show  = __ipipe_prtrace_show
1153
};
1154
1155
static int __ipipe_frozen_prtrace_open(struct inode *inode, struct file *file)
1156
{
1157
	return seq_open(file, &__ipipe_frozen_ptrace_ops);
1158
}
1159
1160
static ssize_t
1161
__ipipe_frozen_ctrl(struct file *file, const char __user *pbuffer,
1162
                    size_t count, loff_t *data)
1163
{
1164
	char *end, buf[16];
1165
	int val;
1166
	int n;
1167
1168
	n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count;
1169
1170
	if (copy_from_user(buf, pbuffer, n))
1171
		return -EFAULT;
1172
1173
	buf[n] = '\0';
1174
	val = simple_strtol(buf, &end, 0);
1175
1176
	if (((*end != '\0') && !isspace(*end)) || (val < 0))
1177
		return -EINVAL;
1178
1179
	mutex_lock(&out_mutex);
1180
	ipipe_trace_frozen_reset();
1181
	if (val > 0)
1182
		ipipe_trace_freeze(-1);
1183
	mutex_unlock(&out_mutex);
1184
1185
	return count;
1186
}
1187
1188
struct file_operations __ipipe_frozen_prtrace_fops = {
1189
	.open       = __ipipe_frozen_prtrace_open,
1190
	.read       = seq_read,
1191
	.write      = __ipipe_frozen_ctrl,
1192
	.llseek     = seq_lseek,
1193
	.release    = seq_release,
1194
};
1195
1196
static int __ipipe_rd_proc_val(char *page, char **start, off_t off,
1197
                               int count, int *eof, void *data)
1198
{
1199
	int len;
1200
1201
	len = sprintf(page, "%u\n", *(int *)data);
1202
	len -= off;
1203
	if (len <= off + count)
1204
		*eof = 1;
1205
	*start = page + off;
1206
	if (len > count)
1207
		len = count;
1208
	if (len < 0)
1209
		len = 0;
1210
1211
	return len;
1212
}
1213
1214
static int __ipipe_wr_proc_val(struct file *file, const char __user *buffer,
1215
                               unsigned long count, void *data)
1216
{
1217
	char *end, buf[16];
1218
	int val;
1219
	int n;
1220
1221
	n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count;
1222
1223
	if (copy_from_user(buf, buffer, n))
1224
		return -EFAULT;
1225
1226
	buf[n] = '\0';
1227
	val = simple_strtol(buf, &end, 0);
1228
1229
	if (((*end != '\0') && !isspace(*end)) || (val < 0))
1230
		return -EINVAL;
1231
1232
	mutex_lock(&out_mutex);
1233
	*(int *)data = val;
1234
	mutex_unlock(&out_mutex);
1235
1236
	return count;
1237
}
1238
1239
static int __ipipe_rd_trigger(char *page, char **start, off_t off, int count,
1240
			      int *eof, void *data)
1241
{
1242
	int len;
1243
1244
	if (!trigger_begin)
1245
		return 0;
1246
1247
	len = sprint_symbol(page, trigger_begin);
1248
	page[len++] = '\n';
1249
1250
	len -= off;
1251
	if (len <= off + count)
1252
		*eof = 1;
1253
	*start = page + off;
1254
	if (len > count)
1255
		len = count;
1256
	if (len < 0)
1257
		len = 0;
1258
1259
	return len;
1260
}
1261
1262
static int __ipipe_wr_trigger(struct file *file, const char __user *buffer,
1263
			      unsigned long count, void *data)
1264
{
1265
	char buf[KSYM_SYMBOL_LEN];
1266
	unsigned long begin, end;
1267
1268
	if (count > sizeof(buf) - 1)
1269
		count = sizeof(buf) - 1;
1270
	if (copy_from_user(buf, buffer, count))
1271
		return -EFAULT;
1272
	buf[count] = 0;
1273
	if (buf[count-1] == '\n')
1274
		buf[count-1] = 0;
1275
1276
	begin = kallsyms_lookup_name(buf);
1277
	if (!begin || !kallsyms_lookup_size_offset(begin, &end, NULL))
1278
		return -ENOENT;
1279
	end += begin - 1;
1280
1281
	mutex_lock(&out_mutex);
1282
	/* invalidate the current range before setting a new one */
1283
	trigger_end = 0;
1284
	wmb();
1285
	ipipe_trace_frozen_reset();
1286
1287
	/* set new range */
1288
	trigger_begin = begin;
1289
	wmb();
1290
	trigger_end = end;
1291
	mutex_unlock(&out_mutex);
1292
1293
	return count;
1294
}
1295
1296
#ifdef CONFIG_IPIPE_TRACE_MCOUNT
1297
static void notrace
1298
ipipe_trace_function(unsigned long ip, unsigned long parent_ip)
1299
{
1300
	if (!ipipe_trace_enable)
1301
		return;
1302
	__ipipe_trace(IPIPE_TRACE_FUNC, ip, parent_ip, 0);
1303
}
1304
1305
static struct ftrace_ops ipipe_trace_ops = {
1306
	.func = ipipe_trace_function
1307
};
1308
1309
static int __ipipe_wr_enable(struct file *file, const char __user *buffer,
1310
			     unsigned long count, void *data)
1311
{
1312
	char *end, buf[16];
1313
	int val;
1314
	int n;
1315
1316
	n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count;
1317
1318
	if (copy_from_user(buf, buffer, n))
1319
		return -EFAULT;
1320
1321
	buf[n] = '\0';
1322
	val = simple_strtol(buf, &end, 0);
1323
1324
	if (((*end != '\0') && !isspace(*end)) || (val < 0))
1325
		return -EINVAL;
1326
1327
	mutex_lock(&out_mutex);
1328
1329
	if (ipipe_trace_enable) {
1330
		if (!val)
1331
			unregister_ftrace_function(&ipipe_trace_ops);
1332
	} else if (val)
1333
		register_ftrace_function(&ipipe_trace_ops);
1334
1335
	ipipe_trace_enable = val;
1336
1337
	mutex_unlock(&out_mutex);
1338
1339
	return count;
1340
}
1341
#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
1342
1343
extern struct proc_dir_entry *ipipe_proc_root;
1344
1345
static struct proc_dir_entry * __init
1346
__ipipe_create_trace_proc_val(struct proc_dir_entry *trace_dir,
1347
                              const char *name, int *value_ptr)
1348
{
1349
	struct proc_dir_entry *entry;
1350
1351
	entry = create_proc_entry(name, 0644, trace_dir);
1352
	if (entry) {
1353
		entry->data = value_ptr;
1354
		entry->read_proc = __ipipe_rd_proc_val;
1355
		entry->write_proc = __ipipe_wr_proc_val;
1356
	}
1357
	return entry;
1358
}
1359
1360
void __init __ipipe_init_tracer(void)
1361
{
1362
	struct proc_dir_entry *trace_dir;
1363
	struct proc_dir_entry *entry;
1364
	unsigned long long start, end, min = ULLONG_MAX;
1365
	int i;
1366
#ifdef CONFIG_IPIPE_TRACE_VMALLOC
1367
	int cpu, path;
1368
1369
	for_each_possible_cpu(cpu) {
1370
		struct ipipe_trace_path *tp_buf;
1371
1372
		tp_buf = vmalloc_node(sizeof(struct ipipe_trace_path) *
1373
				      IPIPE_TRACE_PATHS, cpu_to_node(cpu));
1374
		if (!tp_buf) {
1375
			printk(KERN_ERR "I-pipe: "
1376
			       "insufficient memory for trace buffer.\n");
1377
			return;
1378
		}
1379
		memset(tp_buf, 0,
1380
		       sizeof(struct ipipe_trace_path) * IPIPE_TRACE_PATHS);
1381
		for (path = 0; path < IPIPE_TRACE_PATHS; path++) {
1382
			tp_buf[path].begin = -1;
1383
			tp_buf[path].end   = -1;
1384
		}
1385
		per_cpu(trace_path, cpu) = tp_buf;
1386
	}
1387
#endif /* CONFIG_IPIPE_TRACE_VMALLOC */
1388
1389
	/* Calculate minimum overhead of __ipipe_trace() */
1390
	local_irq_disable_hw();
1391
	for (i = 0; i < 100; i++) {
1392
		ipipe_read_tsc(start);
1393
		__ipipe_trace(IPIPE_TRACE_FUNC, __BUILTIN_RETURN_ADDRESS0,
1394
			      __BUILTIN_RETURN_ADDRESS1, 0);
1395
		ipipe_read_tsc(end);
1396
1397
		end -= start;
1398
		if (end < min)
1399
			min = end;
1400
	}
1401
	local_irq_enable_hw();
1402
	trace_overhead = ipipe_tsc2ns(min);
1403
1404
#ifdef CONFIG_IPIPE_TRACE_ENABLE
1405
	ipipe_trace_enable = 1;
1406
#ifdef CONFIG_IPIPE_TRACE_MCOUNT
1407
	register_ftrace_function(&ipipe_trace_ops);
1408
#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
1409
#endif /* CONFIG_IPIPE_TRACE_ENABLE */
1410
1411
	trace_dir = create_proc_entry("trace", S_IFDIR, ipipe_proc_root);
1412
1413
	entry = create_proc_entry("max", 0644, trace_dir);
1414
	if (entry)
1415
		entry->proc_fops = &__ipipe_max_prtrace_fops;
1416
1417
	entry = create_proc_entry("frozen", 0644, trace_dir);
1418
	if (entry)
1419
		entry->proc_fops = &__ipipe_frozen_prtrace_fops;
1420
1421
	entry = create_proc_entry("trigger", 0644, trace_dir);
1422
	if (entry) {
1423
		entry->read_proc = __ipipe_rd_trigger;
1424
		entry->write_proc = __ipipe_wr_trigger;
1425
	}
1426
1427
	__ipipe_create_trace_proc_val(trace_dir, "pre_trace_points",
1428
	                              &pre_trace);
1429
	__ipipe_create_trace_proc_val(trace_dir, "post_trace_points",
1430
	                              &post_trace);
1431
	__ipipe_create_trace_proc_val(trace_dir, "back_trace_points",
1432
	                              &back_trace);
1433
	__ipipe_create_trace_proc_val(trace_dir, "verbose",
1434
	                              &verbose_trace);
1435
	entry = __ipipe_create_trace_proc_val(trace_dir, "enable",
1436
					      &ipipe_trace_enable);
1437
#ifdef CONFIG_IPIPE_TRACE_MCOUNT
1438
	if (entry)
1439
		entry->write_proc = __ipipe_wr_enable;
1440
#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
1441
}
(-)a/kernel/irq/chip.c (+142 lines)
Lines 15-20 Link Here
15
#include <linux/module.h>
15
#include <linux/module.h>
16
#include <linux/interrupt.h>
16
#include <linux/interrupt.h>
17
#include <linux/kernel_stat.h>
17
#include <linux/kernel_stat.h>
18
#include <linux/ipipe.h>
18
19
19
#include "internals.h"
20
#include "internals.h"
20
21
Lines 459-465 handle_level_irq(unsigned int irq, struct irq_desc *desc) Link Here
459
	irqreturn_t action_ret;
460
	irqreturn_t action_ret;
460
461
461
	spin_lock(&desc->lock);
462
	spin_lock(&desc->lock);
463
#ifndef CONFIG_IPIPE
462
	mask_ack_irq(desc, irq);
464
	mask_ack_irq(desc, irq);
465
#endif
463
466
464
	if (unlikely(desc->status & IRQ_INPROGRESS))
467
	if (unlikely(desc->status & IRQ_INPROGRESS))
465
		goto out_unlock;
468
		goto out_unlock;
Lines 539-546 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) Link Here
539
542
540
	spin_lock(&desc->lock);
543
	spin_lock(&desc->lock);
541
	desc->status &= ~IRQ_INPROGRESS;
544
	desc->status &= ~IRQ_INPROGRESS;
545
#ifdef CONFIG_IPIPE
546
	desc->chip->unmask(irq);
547
out:
548
#else
542
out:
549
out:
543
	desc->chip->eoi(irq);
550
	desc->chip->eoi(irq);
551
#endif
544
552
545
	spin_unlock(&desc->lock);
553
	spin_unlock(&desc->lock);
546
}
554
}
Lines 582-589 handle_edge_irq(unsigned int irq, struct irq_desc *desc) Link Here
582
	kstat_incr_irqs_this_cpu(irq, desc);
590
	kstat_incr_irqs_this_cpu(irq, desc);
583
591
584
	/* Start handling the irq */
592
	/* Start handling the irq */
593
#ifndef CONFIG_IPIPE
585
	if (desc->chip->ack)
594
	if (desc->chip->ack)
586
		desc->chip->ack(irq);
595
		desc->chip->ack(irq);
596
#endif
587
597
588
	/* Mark the IRQ currently in progress.*/
598
	/* Mark the IRQ currently in progress.*/
589
	desc->status |= IRQ_INPROGRESS;
599
	desc->status |= IRQ_INPROGRESS;
Lines 637-644 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) Link Here
637
647
638
	kstat_incr_irqs_this_cpu(irq, desc);
648
	kstat_incr_irqs_this_cpu(irq, desc);
639
649
650
#ifndef CONFIG_IPIPE
640
	if (desc->chip->ack)
651
	if (desc->chip->ack)
641
		desc->chip->ack(irq);
652
		desc->chip->ack(irq);
653
#endif /* CONFIG_IPIPE */
642
654
643
	action_ret = handle_IRQ_event(irq, desc->action);
655
	action_ret = handle_IRQ_event(irq, desc->action);
644
	if (!noirqdebug)
656
	if (!noirqdebug)
Lines 648-653 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) Link Here
648
		desc->chip->eoi(irq);
660
		desc->chip->eoi(irq);
649
}
661
}
650
662
663
#ifdef CONFIG_IPIPE
664
665
void __ipipe_ack_simple_irq(unsigned irq, struct irq_desc *desc)
666
{
667
}
668
669
void __ipipe_end_simple_irq(unsigned irq, struct irq_desc *desc)
670
{
671
}
672
673
void __ipipe_ack_level_irq(unsigned irq, struct irq_desc *desc)
674
{
675
	mask_ack_irq(desc, irq);
676
}
677
678
void __ipipe_end_level_irq(unsigned irq, struct irq_desc *desc)
679
{
680
	if (desc->chip->unmask)
681
		desc->chip->unmask(irq);
682
}
683
684
void __ipipe_ack_fasteoi_irq(unsigned irq, struct irq_desc *desc)
685
{
686
	desc->chip->eoi(irq);
687
}
688
689
void __ipipe_end_fasteoi_irq(unsigned irq, struct irq_desc *desc)
690
{
691
	/*
692
	 * Non-requestable IRQs should not be masked in EOI handler.
693
	 */
694
	if (!(desc->status & IRQ_NOREQUEST))
695
		desc->chip->unmask(irq);
696
}
697
698
void __ipipe_ack_edge_irq(unsigned irq, struct irq_desc *desc)
699
{
700
	desc->chip->ack(irq);
701
}
702
703
void __ipipe_ack_percpu_irq(unsigned irq, struct irq_desc *desc)
704
{
705
	if (desc->chip->ack)
706
		desc->chip->ack(irq);
707
}
708
709
void __ipipe_end_percpu_irq(unsigned irq, struct irq_desc *desc)
710
{
711
	if (desc->chip->eoi)
712
		desc->chip->eoi(irq);
713
}
714
715
void __ipipe_end_edge_irq(unsigned irq, struct irq_desc *desc)
716
{
717
}
718
719
void __ipipe_ack_bad_irq(unsigned irq, struct irq_desc *desc)
720
{
721
	static int done;
722
723
	handle_bad_irq(irq, desc);
724
725
	if (!done) {
726
		printk(KERN_WARNING "%s: unknown flow handler for IRQ %d\n",
727
		       __FUNCTION__, irq);
728
		done = 1;
729
	}
730
}
731
732
void __ipipe_noack_irq(unsigned irq, struct irq_desc *desc)
733
{
734
}
735
736
void __ipipe_noend_irq(unsigned irq, struct irq_desc *desc)
737
{
738
}
739
740
irq_flow_handler_t
741
__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained)
742
{
743
	if (unlikely(handle == NULL)) {
744
		desc->ipipe_ack = &__ipipe_ack_bad_irq;
745
		desc->ipipe_end = &__ipipe_noend_irq;
746
	} else {
747
		if (is_chained) {
748
			desc->ipipe_ack = handle;
749
			desc->ipipe_end = &__ipipe_noend_irq;
750
			handle = __ipipe_noack_irq;
751
		} else if (handle == &handle_simple_irq) {
752
			desc->ipipe_ack = &__ipipe_ack_simple_irq;
753
			desc->ipipe_end = &__ipipe_end_simple_irq;
754
		} else if (handle == &handle_level_irq) {
755
			desc->ipipe_ack = &__ipipe_ack_level_irq;
756
			desc->ipipe_end = &__ipipe_end_level_irq;
757
		} else if (handle == &handle_edge_irq) {
758
			desc->ipipe_ack = &__ipipe_ack_edge_irq;
759
			desc->ipipe_end = &__ipipe_end_edge_irq;
760
		} else if (handle == &handle_fasteoi_irq) {
761
			desc->ipipe_ack = &__ipipe_ack_fasteoi_irq;
762
			desc->ipipe_end = &__ipipe_end_fasteoi_irq;
763
		} else if (handle == &handle_percpu_irq) {
764
			desc->ipipe_ack = &__ipipe_ack_percpu_irq;
765
			desc->ipipe_end = &__ipipe_end_percpu_irq;
766
		} else if (desc->chip == &no_irq_chip) {
767
			desc->ipipe_ack = &__ipipe_noack_irq;
768
			desc->ipipe_end = &__ipipe_noend_irq;
769
		} else {
770
			desc->ipipe_ack = &__ipipe_ack_bad_irq;
771
			desc->ipipe_end = &__ipipe_noend_irq;
772
		}
773
	}
774
775
	/* Suppress intermediate trampoline routine. */
776
	ipipe_root_domain->irqs[desc->irq].acknowledge = desc->ipipe_ack;
777
778
	return handle;
779
}
780
781
#else /* !CONFIG_IPIPE */
782
783
irq_flow_handler_t
784
__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained)
785
{
786
	return handle;
787
}
788
789
#endif /* !CONFIG_IPIPE */
790
651
void
791
void
652
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
792
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
653
		  const char *name)
793
		  const char *name)
Lines 679-684 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, Link Here
679
	chip_bus_lock(irq, desc);
819
	chip_bus_lock(irq, desc);
680
	spin_lock_irqsave(&desc->lock, flags);
820
	spin_lock_irqsave(&desc->lock, flags);
681
821
822
	handle = __fixup_irq_handler(desc, handle, is_chained);
823
682
	/* Uninstall? */
824
	/* Uninstall? */
683
	if (handle == handle_bad_irq) {
825
	if (handle == handle_bad_irq) {
684
		if (desc->chip != &no_irq_chip)
826
		if (desc->chip != &no_irq_chip)
(-)a/kernel/irq/handle.c (+4 lines)
Lines 462-469 unsigned int __do_IRQ(unsigned int irq) Link Here
462
		/*
462
		/*
463
		 * No locking required for CPU-local interrupts:
463
		 * No locking required for CPU-local interrupts:
464
		 */
464
		 */
465
#ifndef CONFIG_IPIPE
465
		if (desc->chip->ack)
466
		if (desc->chip->ack)
466
			desc->chip->ack(irq);
467
			desc->chip->ack(irq);
468
#endif
467
		if (likely(!(desc->status & IRQ_DISABLED))) {
469
		if (likely(!(desc->status & IRQ_DISABLED))) {
468
			action_ret = handle_IRQ_event(irq, desc->action);
470
			action_ret = handle_IRQ_event(irq, desc->action);
469
			if (!noirqdebug)
471
			if (!noirqdebug)
Lines 474-481 unsigned int __do_IRQ(unsigned int irq) Link Here
474
	}
476
	}
475
477
476
	spin_lock(&desc->lock);
478
	spin_lock(&desc->lock);
479
#ifndef CONFIG_IPIPE
477
	if (desc->chip->ack)
480
	if (desc->chip->ack)
478
		desc->chip->ack(irq);
481
		desc->chip->ack(irq);
482
#endif
479
	/*
483
	/*
480
	 * REPLAY is when Linux resends an IRQ that was dropped earlier
484
	 * REPLAY is when Linux resends an IRQ that was dropped earlier
481
	 * WAITING is used by probe to mark irqs that are being tested
485
	 * WAITING is used by probe to mark irqs that are being tested
(-)a/kernel/lockdep.c (-4 / +4 lines)
Lines 2318-2324 void trace_hardirqs_on_caller(unsigned long ip) Link Here
2318
	/* we'll do an OFF -> ON transition: */
2318
	/* we'll do an OFF -> ON transition: */
2319
	curr->hardirqs_enabled = 1;
2319
	curr->hardirqs_enabled = 1;
2320
2320
2321
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2321
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !irqs_disabled_hw()))
2322
		return;
2322
		return;
2323
	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2323
	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2324
		return;
2324
		return;
Lines 2361-2367 void trace_hardirqs_off_caller(unsigned long ip) Link Here
2361
	if (unlikely(!debug_locks || current->lockdep_recursion))
2361
	if (unlikely(!debug_locks || current->lockdep_recursion))
2362
		return;
2362
		return;
2363
2363
2364
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2364
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !irqs_disabled_hw()))
2365
		return;
2365
		return;
2366
2366
2367
	if (curr->hardirqs_enabled) {
2367
	if (curr->hardirqs_enabled) {
Lines 2393-2399 void trace_softirqs_on(unsigned long ip) Link Here
2393
	if (unlikely(!debug_locks))
2393
	if (unlikely(!debug_locks))
2394
		return;
2394
		return;
2395
2395
2396
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2396
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !irqs_disabled_hw()))
2397
		return;
2397
		return;
2398
2398
2399
	if (curr->softirqs_enabled) {
2399
	if (curr->softirqs_enabled) {
Lines 2427-2433 void trace_softirqs_off(unsigned long ip) Link Here
2427
	if (unlikely(!debug_locks))
2427
	if (unlikely(!debug_locks))
2428
		return;
2428
		return;
2429
2429
2430
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2430
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !irqs_disabled_hw()))
2431
		return;
2431
		return;
2432
2432
2433
	if (curr->softirqs_enabled) {
2433
	if (curr->softirqs_enabled) {
(-)a/kernel/panic.c (+3 lines)
Lines 22-27 Link Here
22
#include <linux/init.h>
22
#include <linux/init.h>
23
#include <linux/nmi.h>
23
#include <linux/nmi.h>
24
#include <linux/dmi.h>
24
#include <linux/dmi.h>
25
#include <linux/ipipe_trace.h>
25
26
26
int panic_on_oops;
27
int panic_on_oops;
27
static unsigned long tainted_mask;
28
static unsigned long tainted_mask;
Lines 304-309 void oops_enter(void) Link Here
304
{
305
{
305
	tracing_off();
306
	tracing_off();
306
	/* can't trust the integrity of the kernel anymore: */
307
	/* can't trust the integrity of the kernel anymore: */
308
	ipipe_trace_panic_freeze();
309
	ipipe_disable_context_check(ipipe_processor_id());
307
	debug_locks_off();
310
	debug_locks_off();
308
	do_oops_enter_exit();
311
	do_oops_enter_exit();
309
}
312
}
(-)a/kernel/power/hibernate.c (+5 lines)
Lines 238-243 static int create_image(int platform_mode) Link Here
238
		goto Enable_cpus;
238
		goto Enable_cpus;
239
239
240
	local_irq_disable();
240
	local_irq_disable();
241
	local_irq_disable_hw_cond();
241
242
242
	error = sysdev_suspend(PMSG_FREEZE);
243
	error = sysdev_suspend(PMSG_FREEZE);
243
	if (error) {
244
	if (error) {
Lines 267-272 static int create_image(int platform_mode) Link Here
267
	 */
268
	 */
268
269
269
 Enable_irqs:
270
 Enable_irqs:
271
	local_irq_enable_hw_cond();
270
	local_irq_enable();
272
	local_irq_enable();
271
273
272
 Enable_cpus:
274
 Enable_cpus:
Lines 359-364 static int resume_target_kernel(bool platform_mode) Link Here
359
		goto Enable_cpus;
361
		goto Enable_cpus;
360
362
361
	local_irq_disable();
363
	local_irq_disable();
364
	local_irq_disable_hw_cond();
362
365
363
	error = sysdev_suspend(PMSG_QUIESCE);
366
	error = sysdev_suspend(PMSG_QUIESCE);
364
	if (error)
367
	if (error)
Lines 390-395 static int resume_target_kernel(bool platform_mode) Link Here
390
	sysdev_resume();
393
	sysdev_resume();
391
394
392
 Enable_irqs:
395
 Enable_irqs:
396
	local_irq_enable_hw_cond();
393
	local_irq_enable();
397
	local_irq_enable();
394
398
395
 Enable_cpus:
399
 Enable_cpus:
Lines 471-476 int hibernation_platform_enter(void) Link Here
471
		goto Platform_finish;
475
		goto Platform_finish;
472
476
473
	local_irq_disable();
477
	local_irq_disable();
478
	local_irq_disable_hw_cond();
474
	sysdev_suspend(PMSG_HIBERNATE);
479
	sysdev_suspend(PMSG_HIBERNATE);
475
	hibernation_ops->enter();
480
	hibernation_ops->enter();
476
	/* We should never get here */
481
	/* We should never get here */
(-)a/kernel/printk.c (+95 lines)
Lines 564-569 static int have_callable_console(void) Link Here
564
	return 0;
564
	return 0;
565
}
565
}
566
566
567
#ifdef CONFIG_IPIPE
568
569
static ipipe_spinlock_t __ipipe_printk_lock = IPIPE_SPIN_LOCK_UNLOCKED;
570
571
static int __ipipe_printk_fill;
572
573
static char __ipipe_printk_buf[__LOG_BUF_LEN];
574
575
void __ipipe_flush_printk (unsigned virq, void *cookie)
576
{
577
	char *p = __ipipe_printk_buf;
578
	int len, lmax, out = 0;
579
	unsigned long flags;
580
581
	goto start;
582
583
	do {
584
		spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
585
 start:
586
		lmax = __ipipe_printk_fill;
587
		while (out < lmax) {
588
			len = strlen(p) + 1;
589
			printk("%s",p);
590
			p += len;
591
			out += len;
592
		}
593
		spin_lock_irqsave(&__ipipe_printk_lock, flags);
594
	}
595
	while (__ipipe_printk_fill != lmax);
596
597
	__ipipe_printk_fill = 0;
598
599
	spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
600
}
601
567
/**
602
/**
568
 * printk - print a kernel message
603
 * printk - print a kernel message
569
 * @fmt: format string
604
 * @fmt: format string
Lines 588-593 static int have_callable_console(void) Link Here
588
623
589
asmlinkage int printk(const char *fmt, ...)
624
asmlinkage int printk(const char *fmt, ...)
590
{
625
{
626
	int r, fbytes, oldcount;
627
	unsigned long flags;
628
	int sprintk = 1;
629
	int cs = -1;
630
	va_list args;
631
632
	va_start(args, fmt);
633
634
	local_irq_save_hw(flags);
635
636
	if (test_bit(IPIPE_SPRINTK_FLAG, &__ipipe_current_domain->flags) ||
637
	    oops_in_progress)
638
		cs = ipipe_disable_context_check(ipipe_processor_id());
639
	else if (__ipipe_current_domain == ipipe_root_domain) {
640
		struct ipipe_domain *dom;
641
642
		list_for_each_entry(dom, &__ipipe_pipeline, p_link) {
643
			if (dom == ipipe_root_domain)
644
				break;
645
			if (test_bit(IPIPE_STALL_FLAG,
646
				     &ipipe_cpudom_var(dom, status)))
647
				sprintk = 0;
648
		}
649
	} else
650
		sprintk = 0;
651
652
	local_irq_restore_hw(flags);
653
654
	if (sprintk) {
655
		r = vprintk(fmt, args);
656
		if (cs != -1)
657
			ipipe_restore_context_check(ipipe_processor_id(), cs);
658
		goto out;
659
	}
660
661
	spin_lock_irqsave(&__ipipe_printk_lock, flags);
662
663
	oldcount = __ipipe_printk_fill;
664
	fbytes = __LOG_BUF_LEN - oldcount;
665
666
	if (fbytes > 1)	{
667
		r = vscnprintf(__ipipe_printk_buf + __ipipe_printk_fill,
668
			       fbytes, fmt, args) + 1; /* account for the null byte */
669
		__ipipe_printk_fill += r;
670
	} else
671
		r = 0;
672
673
	spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
674
675
	if (oldcount == 0)
676
		ipipe_trigger_irq(__ipipe_printk_virq);
677
out:
678
	va_end(args);
679
680
	return r;
681
}
682
#else /* !CONFIG_IPIPE */
683
asmlinkage int printk(const char *fmt, ...)
684
{
591
	va_list args;
685
	va_list args;
592
	int r;
686
	int r;
593
687
Lines 597-602 asmlinkage int printk(const char *fmt, ...) Link Here
597
691
598
	return r;
692
	return r;
599
}
693
}
694
#endif /* CONFIG_IPIPE */
600
695
601
/* cpu currently holding logbuf_lock */
696
/* cpu currently holding logbuf_lock */
602
static volatile unsigned int printk_cpu = UINT_MAX;
697
static volatile unsigned int printk_cpu = UINT_MAX;
(-)a/kernel/sched.c (-9 / +111 lines)
Lines 2344-2349 static int try_to_wake_up(struct task_struct *p, unsigned int state, Link Here
2344
			  int wake_flags)
2344
			  int wake_flags)
2345
{
2345
{
2346
	int cpu, orig_cpu, this_cpu, success = 0;
2346
	int cpu, orig_cpu, this_cpu, success = 0;
2347
	unsigned int old_state;
2347
	unsigned long flags;
2348
	unsigned long flags;
2348
	struct rq *rq, *orig_rq;
2349
	struct rq *rq, *orig_rq;
2349
2350
Lines 2355-2361 static int try_to_wake_up(struct task_st Link Here
2355
	smp_wmb();
2356
	smp_wmb();
2356
	rq = orig_rq = task_rq_lock(p, &flags);
2357
	rq = orig_rq = task_rq_lock(p, &flags);
2357
	update_rq_clock(rq);
2358
	update_rq_clock(rq);
2358
	if (!(p->state & state))
2359
	old_state = p->state;
2360
 	if (!(old_state & state) ||
2361
	    (old_state & (TASK_NOWAKEUP|TASK_ATOMICSWITCH)))
2359
		goto out;
2362
		goto out;
2360
2363
2361
	if (p->se.on_rq)
2364
	if (p->se.on_rq)
Lines 2840-2861 asmlinkage void schedule_tail(struct tas Link Here
2840
#endif
2843
#endif
2841
	if (current->set_child_tid)
2844
	if (current->set_child_tid)
2842
		put_user(task_pid_vnr(current), current->set_child_tid);
2845
		put_user(task_pid_vnr(current), current->set_child_tid);
2846
2847
 	ipipe_init_notify(current);
2843
}
2848
}
2844
2849
2845
/*
2850
/*
2846
 * context_switch - switch to the new MM and the new
2851
 * context_switch - switch to the new MM and the new
2847
 * thread's register state.
2852
 * thread's register state.
2848
 */
2853
 */
2849
static inline void
2854
int
2850
context_switch(struct rq *rq, struct task_struct *prev,
2855
context_switch(struct rq *rq, struct task_struct *prev,
2851
	       struct task_struct *next)
2856
	       struct task_struct *next)
2852
{
2857
{
2853
	struct mm_struct *mm, *oldmm;
2858
	struct mm_struct *mm, *oldmm;
2854
2859
2855
	prepare_task_switch(rq, prev, next);
2856
	trace_sched_switch(rq, prev, next);
2857
	mm = next->mm;
2860
	mm = next->mm;
2858
	oldmm = prev->active_mm;
2861
	oldmm = prev->active_mm;
2862
2863
if (!rq) {
2864
	switch_mm(oldmm, next->active_mm, next);
2865
	if (!mm) enter_lazy_tlb(oldmm, next);
2866
} else {
2867
	prepare_task_switch(rq, prev, next);
2868
	trace_sched_switch(rq, prev, next);
2859
	/*
2869
	/*
2860
	 * For paravirt, this is coupled with an exit in switch_to to
2870
	 * For paravirt, this is coupled with an exit in switch_to to
2861
	 * combine the page table reload and the switch backend into
2871
	 * combine the page table reload and the switch backend into
Lines 2883-2893 context_switch(struct rq *rq, struct tas Link Here
2883
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2893
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2884
	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2894
	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2885
#endif
2895
#endif
2886
2896
}
2897
#ifdef CONFIG_IPIPE
2898
	next->ptd[IPIPE_ROOT_NPTDKEYS - 1] = prev;
2899
#endif /* CONFIG_IPIPE */
2887
	/* Here we just switch the register state and the stack. */
2900
	/* Here we just switch the register state and the stack. */
2888
	switch_to(prev, next, prev);
2901
	switch_to(prev, next, prev);
2889
2902
2890
	barrier();
2903
	barrier();
2904
2905
if (unlikely(rq)) {
2906
#if 1 // def CONFIG_IPIPE_DELAYED_ATOMICSW
2907
	current->state &= ~TASK_ATOMICSWITCH;
2908
#else
2909
	prev->state &= ~TASK_ATOMICSWITCH;
2910
#endif
2911
	if (task_hijacked(prev))
2912
		return 1; __ipipe_dispatch_event(IPIPE_FIRST_EVENT - 2, 0);
2913
2891
	/*
2914
	/*
2892
	 * this_rq must be evaluated again because prev may have moved
2915
	 * this_rq must be evaluated again because prev may have moved
2893
	 * CPUs since it called schedule(), thus the 'rq' on its stack
2916
	 * CPUs since it called schedule(), thus the 'rq' on its stack
Lines 2895-2900 context_switch(struct rq *rq, struct tas Link Here
2895
	 */
2918
	 */
2896
	finish_task_switch(this_rq(), prev);
2919
	finish_task_switch(this_rq(), prev);
2897
}
2920
}
2921
	return 0;
2922
}
2923
2924
EXPORT_SYMBOL(context_switch);
2898
2925
2899
/*
2926
/*
2900
 * nr_running, nr_uninterruptible and nr_context_switches:
2927
 * nr_running, nr_uninterruptible and nr_context_switches:
Lines 5300-5305 notrace unsigned long get_parent_ip(unsi Link Here
5300
5327
5301
void __kprobes add_preempt_count(int val)
5328
void __kprobes add_preempt_count(int val)
5302
{
5329
{
5330
 	ipipe_check_context(ipipe_root_domain);
5303
#ifdef CONFIG_DEBUG_PREEMPT
5331
#ifdef CONFIG_DEBUG_PREEMPT
5304
	/*
5332
	/*
5305
	 * Underflow?
5333
	 * Underflow?
Lines 5322-5327 EXPORT_SYMBOL(add_preempt_count); Link Here
5322
5350
5323
void __kprobes sub_preempt_count(int val)
5351
void __kprobes sub_preempt_count(int val)
5324
{
5352
{
5353
 	ipipe_check_context(ipipe_root_domain);
5325
#ifdef CONFIG_DEBUG_PREEMPT
5354
#ifdef CONFIG_DEBUG_PREEMPT
5326
	/*
5355
	/*
5327
	 * Underflow?
5356
	 * Underflow?
Lines 5370-5375 static noinline void __schedule_bug(stru Link Here
5370
 */
5399
 */
5371
static inline void schedule_debug(struct task_struct *prev)
5400
static inline void schedule_debug(struct task_struct *prev)
5372
{
5401
{
5402
	ipipe_check_context(ipipe_root_domain);
5373
	/*
5403
	/*
5374
	 * Test if we are atomic. Since do_exit() needs to call into
5404
	 * Test if we are atomic. Since do_exit() needs to call into
5375
	 * schedule() atomically, we ignore that path for now.
5405
	 * schedule() atomically, we ignore that path for now.
Lines 5448-5454 pick_next_task(struct rq *rq) Link Here
5448
/*
5478
/*
5449
 * schedule() is the main scheduler function.
5479
 * schedule() is the main scheduler function.
5450
 */
5480
 */
5451
asmlinkage void __sched schedule(void)
5481
asmlinkage int __sched schedule(void)
5452
{
5482
{
5453
	struct task_struct *prev, *next;
5483
	struct task_struct *prev, *next;
5454
	unsigned long *switch_count;
5484
	unsigned long *switch_count;
Lines 5462-5467 need_resched: Link Here
5462
	rcu_sched_qs(cpu);
5492
	rcu_sched_qs(cpu);
5463
	prev = rq->curr;
5493
	prev = rq->curr;
5464
	switch_count = &prev->nivcsw;
5494
	switch_count = &prev->nivcsw;
5495
 	if (unlikely(prev->state & TASK_ATOMICSWITCH))
5496
		/* Pop one disable level -- one still remains. */
5497
		preempt_enable();
5465
5498
5466
	release_kernel_lock(prev);
5499
	release_kernel_lock(prev);
5467
need_resched_nonpreemptible:
5500
need_resched_nonpreemptible:
Lines 5499-5513 need_resched_nonpreemptible: Link Here
5499
		rq->curr = next;
5532
		rq->curr = next;
5500
		++*switch_count;
5533
		++*switch_count;
5501
5534
5502
		context_switch(rq, prev, next); /* unlocks the rq */
5535
		if (context_switch(rq, prev, next)) /* unlocks the rq */
5536
 			return 1; /* task hijacked by higher domain */
5503
		/*
5537
		/*
5504
		 * the context switch might have flipped the stack from under
5538
		 * the context switch might have flipped the stack from under
5505
		 * us, hence refresh the local variables.
5539
		 * us, hence refresh the local variables.
5506
		 */
5540
		 */
5507
		cpu = smp_processor_id();
5541
		cpu = smp_processor_id();
5508
		rq = cpu_rq(cpu);
5542
		rq = cpu_rq(cpu);
5509
	} else
5543
	} else {
5544
 		prev->state &= ~TASK_ATOMICSWITCH;
5510
		spin_unlock_irq(&rq->lock);
5545
		spin_unlock_irq(&rq->lock);
5546
	}
5511
5547
5512
	post_schedule(rq);
5548
	post_schedule(rq);
5513
5549
Lines 5517-5522 need_resched_nonpreemptible: Link Here
5517
	preempt_enable_no_resched();
5553
	preempt_enable_no_resched();
5518
	if (need_resched())
5554
	if (need_resched())
5519
		goto need_resched;
5555
		goto need_resched;
5556
5557
	return 0;
5520
}
5558
}
5521
EXPORT_SYMBOL(schedule);
5559
EXPORT_SYMBOL(schedule);
5522
5560
Lines 5600-5606 asmlinkage void __sched preempt_schedule Link Here
5600
5638
5601
	do {
5639
	do {
5602
		add_preempt_count(PREEMPT_ACTIVE);
5640
		add_preempt_count(PREEMPT_ACTIVE);
5603
		schedule();
5641
		if (schedule())
5642
			return;
5604
		sub_preempt_count(PREEMPT_ACTIVE);
5643
		sub_preempt_count(PREEMPT_ACTIVE);
5605
5644
5606
		/*
5645
		/*
Lines 6371-6376 recheck: Link Here
6371
	oldprio = p->prio;
6410
	oldprio = p->prio;
6372
	prev_class = p->sched_class;
6411
	prev_class = p->sched_class;
6373
	__setscheduler(rq, p, policy, param->sched_priority);
6412
	__setscheduler(rq, p, policy, param->sched_priority);
6413
  	ipipe_setsched_notify(p);
6374
6414
6375
	if (running)
6415
	if (running)
6376
		p->sched_class->set_curr_task(rq);
6416
		p->sched_class->set_curr_task(rq);
Lines 7018-7023 void __cpuinit init_idle(struct task_str Link Here
7018
#else
7058
#else
7019
	task_thread_info(idle)->preempt_count = 0;
7059
	task_thread_info(idle)->preempt_count = 0;
7020
#endif
7060
#endif
7061
	ipipe_check_context(ipipe_root_domain);
7021
	/*
7062
	/*
7022
	 * The idle tasks have their own, simple scheduling class:
7063
	 * The idle tasks have their own, simple scheduling class:
7023
	 */
7064
	 */
Lines 10958-10960 void synchronize_sched_expedited(void) Link Here
10958
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
10999
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
10959
11000
10960
#endif /* #else #ifndef CONFIG_SMP */
11001
#endif /* #else #ifndef CONFIG_SMP */
11002
11003
#ifdef CONFIG_IPIPE
11004
11005
int ipipe_setscheduler_root(struct task_struct *p, int policy, int prio)
11006
{
11007
	const struct sched_class *prev_class = p->sched_class;
11008
	int oldprio, on_rq, running;
11009
	unsigned long flags;
11010
	struct rq *rq;
11011
11012
	spin_lock_irqsave(&p->pi_lock, flags);
11013
	rq = __task_rq_lock(p);
11014
	update_rq_clock(rq);
11015
	on_rq = p->se.on_rq;
11016
	running = task_current(rq, p);
11017
	if (on_rq)
11018
		deactivate_task(rq, p, 0);
11019
	if (running)
11020
		p->sched_class->put_prev_task(rq, p);
11021
11022
	p->sched_reset_on_fork = 0;
11023
11024
	oldprio = p->prio;
11025
	__setscheduler(rq, p, policy, prio);
11026
	ipipe_setsched_notify(p);
11027
11028
	if (running)
11029
		p->sched_class->set_curr_task(rq);
11030
	if (on_rq) {
11031
		activate_task(rq, p, 0);
11032
11033
		check_class_changed(rq, p, prev_class, oldprio, running);
11034
	}
11035
	__task_rq_unlock(rq);
11036
	spin_unlock_irqrestore(&p->pi_lock, flags);
11037
11038
	rt_mutex_adjust_pi(p);
11039
11040
	return 0;
11041
}
11042
EXPORT_SYMBOL_GPL(ipipe_setscheduler_root);
11043
11044
int ipipe_reenter_root(struct task_struct *prev, int policy, int prio)
11045
{
11046
	struct rq *rq = this_rq();
11047
11048
	finish_task_switch(rq, prev);
11049
11050
	post_schedule(rq);
11051
11052
	(void)reacquire_kernel_lock(current);
11053
	preempt_enable_no_resched();
11054
11055
	if (current->policy != policy || current->rt_priority != prio)
11056
		return ipipe_setscheduler_root(current, policy, prio);
11057
11058
	return 0;
11059
}
11060
EXPORT_SYMBOL_GPL(ipipe_reenter_root);
11061
11062
#endif /* CONFIG_IPIPE */
(-)a/kernel/signal.c (+1 lines)
Lines 518-523 void signal_wake_up(struct task_struct *t, int resume) Link Here
518
	unsigned int mask;
518
	unsigned int mask;
519
519
520
	set_tsk_thread_flag(t, TIF_SIGPENDING);
520
	set_tsk_thread_flag(t, TIF_SIGPENDING);
521
	ipipe_sigwake_notify(t); /* TIF_SIGPENDING must be set first. */
521
522
522
	/*
523
	/*
523
	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
524
	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
(-)a/kernel/spinlock.c (-1 / +3 lines)
Lines 50-56 EXPORT_SYMBOL(_write_trylock); Link Here
50
 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
50
 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
51
 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
51
 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
52
 */
52
 */
53
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
53
#if !defined(CONFIG_GENERIC_LOCKBREAK) || \
54
  defined(CONFIG_DEBUG_LOCK_ALLOC) || \
55
  defined(CONFIG_IPIPE)
54
56
55
#ifndef _read_lock
57
#ifndef _read_lock
56
void __lockfunc _read_lock(rwlock_t *lock)
58
void __lockfunc _read_lock(rwlock_t *lock)
(-)a/kernel/time/tick-common.c (-1 / +5 lines)
Lines 69-75 static void tick_periodic(int cpu) Link Here
69
		write_sequnlock(&xtime_lock);
69
		write_sequnlock(&xtime_lock);
70
	}
70
	}
71
71
72
	update_process_times(user_mode(get_irq_regs()));
72
	update_root_process_times(get_irq_regs());
73
	profile_tick(CPU_PROFILING);
73
	profile_tick(CPU_PROFILING);
74
}
74
}
75
75
Lines 177-182 static void tick_setup_device(struct tick_device *td, Link Here
177
177
178
	td->evtdev = newdev;
178
	td->evtdev = newdev;
179
179
180
	/* I-pipe: derive global tick IRQ from CPU 0 */
181
	if (cpu == 0)
182
		ipipe_update_tick_evtdev(newdev);
183
180
	/*
184
	/*
181
	 * When the device is not per cpu, pin the interrupt to the
185
	 * When the device is not per cpu, pin the interrupt to the
182
	 * current cpu:
186
	 * current cpu:
(-)a/kernel/time/tick-sched.c (-2 / +2 lines)
Lines 549-555 static void tick_nohz_handler(struct clock_event_device *dev) Link Here
549
		ts->idle_jiffies++;
549
		ts->idle_jiffies++;
550
	}
550
	}
551
551
552
	update_process_times(user_mode(regs));
552
	update_root_process_times(regs);
553
	profile_tick(CPU_PROFILING);
553
	profile_tick(CPU_PROFILING);
554
554
555
	while (tick_nohz_reprogram(ts, now)) {
555
	while (tick_nohz_reprogram(ts, now)) {
Lines 700-706 static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) Link Here
700
			touch_softlockup_watchdog();
700
			touch_softlockup_watchdog();
701
			ts->idle_jiffies++;
701
			ts->idle_jiffies++;
702
		}
702
		}
703
		update_process_times(user_mode(regs));
703
		update_root_process_times(regs);
704
		profile_tick(CPU_PROFILING);
704
		profile_tick(CPU_PROFILING);
705
	}
705
	}
706
706
(-)a/kernel/timer.c (+19 lines)
Lines 1204-1209 void update_process_times(int user_tick) Link Here
1204
	run_posix_cpu_timers(p);
1204
	run_posix_cpu_timers(p);
1205
}
1205
}
1206
1206
1207
#ifdef CONFIG_IPIPE
1208
1209
void update_root_process_times(struct pt_regs *regs)
1210
{
1211
	int cpu, user_tick = user_mode(regs);
1212
1213
	if (__ipipe_root_tick_p(regs)) {
1214
		update_process_times(user_tick);
1215
		return;
1216
	}
1217
1218
	run_local_timers();
1219
	cpu = smp_processor_id();
1220
	rcu_check_callbacks(cpu, user_tick);
1221
	run_posix_cpu_timers(current);
1222
}
1223
1224
#endif
1225
1207
/*
1226
/*
1208
 * This function runs timers and the timer-tq in bottom half context.
1227
 * This function runs timers and the timer-tq in bottom half context.
1209
 */
1228
 */
(-)a/kernel/trace/ftrace.c (-4 / +14 lines)
Lines 28-33 Link Here
28
#include <linux/ctype.h>
28
#include <linux/ctype.h>
29
#include <linux/list.h>
29
#include <linux/list.h>
30
#include <linux/hash.h>
30
#include <linux/hash.h>
31
#include <linux/ipipe.h>
31
32
32
#include <trace/events/sched.h>
33
#include <trace/events/sched.h>
33
34
Lines 1142-1147 static int __ftrace_modify_code(void *data) Link Here
1142
1143
1143
static void ftrace_run_update_code(int command)
1144
static void ftrace_run_update_code(int command)
1144
{
1145
{
1146
#ifdef CONFIG_IPIPE
1147
	unsigned long flags;
1148
#endif /* CONFIG_IPIPE */
1145
	int ret;
1149
	int ret;
1146
1150
1147
	ret = ftrace_arch_code_modify_prepare();
1151
	ret = ftrace_arch_code_modify_prepare();
Lines 1149-1155 static void ftrace_run_update_code(int command) Link Here
1149
	if (ret)
1153
	if (ret)
1150
		return;
1154
		return;
1151
1155
1156
#ifdef CONFIG_IPIPE
1157
	flags = ipipe_critical_enter(NULL);
1158
	__ftrace_modify_code(&command);
1159
	ipipe_critical_exit(flags);
1160
#else  /* !CONFIG_IPIPE */
1152
	stop_machine(__ftrace_modify_code, &command, NULL);
1161
	stop_machine(__ftrace_modify_code, &command, NULL);
1162
#endif /* !CONFIG_IPIPE */
1153
1163
1154
	ret = ftrace_arch_code_modify_post_process();
1164
	ret = ftrace_arch_code_modify_post_process();
1155
	FTRACE_WARN_ON(ret);
1165
	FTRACE_WARN_ON(ret);
Lines 2648-2656 static int ftrace_convert_nops(struct module *mod, Link Here
2648
	}
2658
	}
2649
2659
2650
	/* disable interrupts to prevent kstop machine */
2660
	/* disable interrupts to prevent kstop machine */
2651
	local_irq_save(flags);
2661
	local_irq_save_hw_notrace(flags);
2652
	ftrace_update_code(mod);
2662
	ftrace_update_code(mod);
2653
	local_irq_restore(flags);
2663
	local_irq_restore_hw_notrace(flags);
2654
	mutex_unlock(&ftrace_lock);
2664
	mutex_unlock(&ftrace_lock);
2655
2665
2656
	return 0;
2666
	return 0;
Lines 2729-2737 void __init ftrace_init(void) Link Here
2729
	/* Keep the ftrace pointer to the stub */
2739
	/* Keep the ftrace pointer to the stub */
2730
	addr = (unsigned long)ftrace_stub;
2740
	addr = (unsigned long)ftrace_stub;
2731
2741
2732
	local_irq_save(flags);
2742
	local_irq_save_hw_notrace(flags);
2733
	ftrace_dyn_arch_init(&addr);
2743
	ftrace_dyn_arch_init(&addr);
2734
	local_irq_restore(flags);
2744
	local_irq_restore_hw_notrace(flags);
2735
2745
2736
	/* ftrace_dyn_arch_init places the return code in addr */
2746
	/* ftrace_dyn_arch_init places the return code in addr */
2737
	if (addr)
2747
	if (addr)
(-)a/lib/Kconfig.debug (+2 lines)
Lines 136-141 config DEBUG_SECTION_MISMATCH Link Here
136
	  - Enable verbose reporting from modpost to help solving
136
	  - Enable verbose reporting from modpost to help solving
137
	    the section mismatches reported.
137
	    the section mismatches reported.
138
138
139
source "kernel/ipipe/Kconfig.debug"
140
139
config DEBUG_KERNEL
141
config DEBUG_KERNEL
140
	bool "Kernel debugging"
142
	bool "Kernel debugging"
141
	help
143
	help
(-)a/lib/bust_spinlocks.c (+2 lines)
Lines 13-18 Link Here
13
#include <linux/wait.h>
13
#include <linux/wait.h>
14
#include <linux/vt_kern.h>
14
#include <linux/vt_kern.h>
15
#include <linux/console.h>
15
#include <linux/console.h>
16
#include <linux/ipipe_trace.h>
16
17
17
18
18
void __attribute__((weak)) bust_spinlocks(int yes)
19
void __attribute__((weak)) bust_spinlocks(int yes)
Lines 24-29 void __attribute__((weak)) bust_spinlocks(int yes) Link Here
24
		unblank_screen();
25
		unblank_screen();
25
#endif
26
#endif
26
		console_unblank();
27
		console_unblank();
28
  		ipipe_trace_panic_dump();
27
		if (--oops_in_progress == 0)
29
		if (--oops_in_progress == 0)
28
			wake_up_klogd();
30
			wake_up_klogd();
29
	}
31
	}
(-)a/lib/ioremap.c (-2 / +2 lines)
Lines 85-92 int ioremap_page_range(unsigned long addr, Link Here
85
		if (err)
85
		if (err)
86
			break;
86
			break;
87
	} while (pgd++, addr = next, addr != end);
87
	} while (pgd++, addr = next, addr != end);
88
88
	__ipipe_pin_range_globally(start, end);
89
	flush_cache_vmap(start, end);
89
 	flush_cache_vmap(start, end);
90
90
91
	return err;
91
	return err;
92
}
92
}
(-)a/lib/smp_processor_id.c (-1 / +4 lines)
Lines 12-21 notrace unsigned int debug_smp_processor_id(void) Link Here
12
	unsigned long preempt_count = preempt_count();
12
	unsigned long preempt_count = preempt_count();
13
	int this_cpu = raw_smp_processor_id();
13
	int this_cpu = raw_smp_processor_id();
14
14
15
	if (!ipipe_root_domain_p)
16
		goto out;
17
15
	if (likely(preempt_count))
18
	if (likely(preempt_count))
16
		goto out;
19
		goto out;
17
20
18
	if (irqs_disabled())
21
	if (irqs_disabled() || irqs_disabled_hw())
19
		goto out;
22
		goto out;
20
23
21
	/*
24
	/*
(-)a/mm/memory.c (-31 / +83 lines)
Lines 56-61 Link Here
56
#include <linux/kallsyms.h>
56
#include <linux/kallsyms.h>
57
#include <linux/swapops.h>
57
#include <linux/swapops.h>
58
#include <linux/elf.h>
58
#include <linux/elf.h>
59
#include <linux/vmalloc.h>
59
60
60
#include <asm/io.h>
61
#include <asm/io.h>
61
#include <asm/pgalloc.h>
62
#include <asm/pgalloc.h>
Lines 566-571 out: Link Here
566
	return pfn_to_page(pfn);
567
	return pfn_to_page(pfn);
567
}
568
}
568
569
570
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
571
{
572
	/*
573
	 * If the source page was a PFN mapping, we don't have
574
	 * a "struct page" for it. We do a best-effort copy by
575
	 * just copying from the original user address. If that
576
	 * fails, we just zero-fill it. Live with it.
577
	 */
578
	if (unlikely(!src)) {
579
		void *kaddr = kmap_atomic(dst, KM_USER0);
580
		void __user *uaddr = (void __user *)(va & PAGE_MASK);
581
582
		/*
583
		 * This really shouldn't fail, because the page is there
584
		 * in the page tables. But it might just be unreadable,
585
		 * in which case we just give up and fill the result with
586
		 * zeroes.
587
		 */
588
		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
589
			memset(kaddr, 0, PAGE_SIZE);
590
		kunmap_atomic(kaddr, KM_USER0);
591
		flush_dcache_page(dst);
592
	} else
593
		copy_user_highpage(dst, src, va, vma);
594
}
595
569
/*
596
/*
570
 * copy one vm_area from one task to the other. Assumes the page tables
597
 * copy one vm_area from one task to the other. Assumes the page tables
571
 * already present in the new task to be cleared in the whole range
598
 * already present in the new task to be cleared in the whole range
Lines 574-581 out: Link Here
574
601
575
static inline void
602
static inline void
576
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
603
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
577
		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
604
	     pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
578
		unsigned long addr, int *rss)
605
	     unsigned long addr, int *rss, struct page *uncow_page)
579
{
606
{
580
	unsigned long vm_flags = vma->vm_flags;
607
	unsigned long vm_flags = vma->vm_flags;
581
	pte_t pte = *src_pte;
608
	pte_t pte = *src_pte;
Lines 614-619 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, Link Here
614
	 * in the parent and the child
641
	 * in the parent and the child
615
	 */
642
	 */
616
	if (is_cow_mapping(vm_flags)) {
643
	if (is_cow_mapping(vm_flags)) {
644
#ifdef CONFIG_IPIPE
645
		if (uncow_page) {
646
			struct page *old_page = vm_normal_page(vma, addr, pte);
647
			cow_user_page(uncow_page, old_page, addr, vma);
648
			pte = mk_pte(uncow_page, vma->vm_page_prot);
649
650
			if (vm_flags & VM_SHARED)
651
				pte = pte_mkclean(pte);
652
			pte = pte_mkold(pte);
653
654
			page_add_new_anon_rmap(uncow_page, vma, addr);
655
			rss[!!PageAnon(uncow_page)]++;
656
			goto out_set_pte;
657
		}
658
#endif /* CONFIG_IPIPE */
617
		ptep_set_wrprotect(src_mm, addr, src_pte);
659
		ptep_set_wrprotect(src_mm, addr, src_pte);
618
		pte = pte_wrprotect(pte);
660
		pte = pte_wrprotect(pte);
619
	}
661
	}
Lines 645-657 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, Link Here
645
	pte_t *src_pte, *dst_pte;
687
	pte_t *src_pte, *dst_pte;
646
	spinlock_t *src_ptl, *dst_ptl;
688
	spinlock_t *src_ptl, *dst_ptl;
647
	int progress = 0;
689
	int progress = 0;
690
	struct page *uncow_page = NULL;
648
	int rss[2];
691
	int rss[2];
649
692
#ifdef CONFIG_IPIPE
693
	int do_cow_break = 0;
694
again:
695
	if (do_cow_break) {
696
		uncow_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
697
		if (!uncow_page)
698
			return -ENOMEM;
699
		do_cow_break = 0;
700
	}
701
#else
650
again:
702
again:
703
#endif
651
	rss[1] = rss[0] = 0;
704
	rss[1] = rss[0] = 0;
652
	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
705
	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
653
	if (!dst_pte)
706
	if (!dst_pte) {
707
		if (uncow_page)
708
			page_cache_release(uncow_page);
654
		return -ENOMEM;
709
		return -ENOMEM;
710
	}
655
	src_pte = pte_offset_map_nested(src_pmd, addr);
711
	src_pte = pte_offset_map_nested(src_pmd, addr);
656
	src_ptl = pte_lockptr(src_mm, src_pmd);
712
	src_ptl = pte_lockptr(src_mm, src_pmd);
657
	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
713
	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Lines 674-680 again: Link Here
674
			progress++;
730
			progress++;
675
			continue;
731
			continue;
676
		}
732
		}
677
		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
733
#ifdef CONFIG_IPIPE
734
		if (likely(uncow_page == NULL) && likely(pte_present(*src_pte))) {
735
			if (is_cow_mapping(vma->vm_flags) &&
736
			    test_bit(MMF_VM_PINNED, &src_mm->flags) &&
737
			    ((vma->vm_flags|src_mm->def_flags) & VM_LOCKED)) {
738
				arch_leave_lazy_mmu_mode();
739
				spin_unlock(src_ptl);
740
				pte_unmap_nested(src_pte);
741
				add_mm_rss(dst_mm, rss[0], rss[1]);
742
				pte_unmap_unlock(dst_pte, dst_ptl);
743
				cond_resched();
744
				do_cow_break = 1;
745
				goto again;
746
			}
747
		}
748
#endif
749
		copy_one_pte(dst_mm, src_mm, dst_pte,
750
			     src_pte, vma, addr, rss, uncow_page);
751
		uncow_page = NULL;
678
		progress += 8;
752
		progress += 8;
679
	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
753
	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
680
754
Lines 1941-1972 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) Link Here
1941
	return pte;
2015
	return pte;
1942
}
2016
}
1943
2017
1944
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
1945
{
1946
	/*
1947
	 * If the source page was a PFN mapping, we don't have
1948
	 * a "struct page" for it. We do a best-effort copy by
1949
	 * just copying from the original user address. If that
1950
	 * fails, we just zero-fill it. Live with it.
1951
	 */
1952
	if (unlikely(!src)) {
1953
		void *kaddr = kmap_atomic(dst, KM_USER0);
1954
		void __user *uaddr = (void __user *)(va & PAGE_MASK);
1955
1956
		/*
1957
		 * This really shouldn't fail, because the page is there
1958
		 * in the page tables. But it might just be unreadable,
1959
		 * in which case we just give up and fill the result with
1960
		 * zeroes.
1961
		 */
1962
		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
1963
			memset(kaddr, 0, PAGE_SIZE);
1964
		kunmap_atomic(kaddr, KM_USER0);
1965
		flush_dcache_page(dst);
1966
	} else
1967
		copy_user_highpage(dst, src, va, vma);
1968
}
1969
1970
/*
2018
/*
1971
 * This routine handles present pages, when users try to write
2019
 * This routine handles present pages, when users try to write
1972
 * to a shared page. It is done by copying the page to a new address
2020
 * to a shared page. It is done by copying the page to a new address
Lines 3377-3379 void might_fault(void) Link Here
3377
}
3425
}
3378
EXPORT_SYMBOL(might_fault);
3426
EXPORT_SYMBOL(might_fault);
3379
#endif
3427
#endif
3428
3429
#ifdef CONFIG_IPIPE
3430
3431
static inline int ipipe_pin_

Return to bug 377831