Line 0
Link Here
|
|
|
1 |
/* -*- linux-c -*- |
2 |
* linux/arch/x86/kernel/ipipe.c |
3 |
* |
4 |
* Copyright (C) 2002-2007 Philippe Gerum. |
5 |
* |
6 |
* This program is free software; you can redistribute it and/or modify |
7 |
* it under the terms of the GNU General Public License as published by |
8 |
* the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, |
9 |
* USA; either version 2 of the License, or (at your option) any later |
10 |
* version. |
11 |
* |
12 |
* This program is distributed in the hope that it will be useful, |
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 |
* GNU General Public License for more details. |
16 |
* |
17 |
* You should have received a copy of the GNU General Public License |
18 |
* along with this program; if not, write to the Free Software |
19 |
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
20 |
* |
21 |
* Architecture-dependent I-PIPE support for x86. |
22 |
*/ |
23 |
|
24 |
#include <linux/kernel.h> |
25 |
#include <linux/smp.h> |
26 |
#include <linux/module.h> |
27 |
#include <linux/sched.h> |
28 |
#include <linux/interrupt.h> |
29 |
#include <linux/slab.h> |
30 |
#include <linux/irq.h> |
31 |
#include <linux/clockchips.h> |
32 |
#include <linux/kprobes.h> |
33 |
#include <asm/unistd.h> |
34 |
#include <asm/system.h> |
35 |
#include <asm/atomic.h> |
36 |
#include <asm/hw_irq.h> |
37 |
#include <asm/irq.h> |
38 |
#include <asm/desc.h> |
39 |
#include <asm/io.h> |
40 |
#ifdef CONFIG_X86_LOCAL_APIC |
41 |
#include <asm/tlbflush.h> |
42 |
#include <asm/fixmap.h> |
43 |
#include <asm/bitops.h> |
44 |
#include <asm/mpspec.h> |
45 |
#ifdef CONFIG_X86_IO_APIC |
46 |
#include <asm/io_apic.h> |
47 |
#endif /* CONFIG_X86_IO_APIC */ |
48 |
#include <asm/apic.h> |
49 |
#endif /* CONFIG_X86_LOCAL_APIC */ |
50 |
#include <asm/traps.h> |
51 |
|
52 |
int __ipipe_tick_irq = 0; /* Legacy timer */ |
53 |
|
54 |
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); |
55 |
|
56 |
DEFINE_PER_CPU(unsigned long, __ipipe_cr2); |
57 |
EXPORT_PER_CPU_SYMBOL_GPL(__ipipe_cr2); |
58 |
|
59 |
#ifdef CONFIG_SMP |
60 |
|
61 |
static cpumask_t __ipipe_cpu_sync_map; |
62 |
|
63 |
static cpumask_t __ipipe_cpu_lock_map; |
64 |
|
65 |
static unsigned long __ipipe_critical_lock; |
66 |
|
67 |
static IPIPE_DEFINE_SPINLOCK(__ipipe_cpu_barrier); |
68 |
|
69 |
static atomic_t __ipipe_critical_count = ATOMIC_INIT(0); |
70 |
|
71 |
static void (*__ipipe_cpu_sync) (void); |
72 |
|
73 |
#endif /* CONFIG_SMP */ |
74 |
|
75 |
/* |
76 |
* ipipe_trigger_irq() -- Push the interrupt at front of the pipeline |
77 |
* just like if it has been actually received from a hw source. Also |
78 |
* works for virtual interrupts. |
79 |
*/ |
80 |
int ipipe_trigger_irq(unsigned int irq) |
81 |
{ |
82 |
struct pt_regs regs; |
83 |
unsigned long flags; |
84 |
|
85 |
#ifdef CONFIG_IPIPE_DEBUG |
86 |
if (irq >= IPIPE_NR_IRQS) |
87 |
return -EINVAL; |
88 |
if (ipipe_virtual_irq_p(irq)) { |
89 |
if (!test_bit(irq - IPIPE_VIRQ_BASE, |
90 |
&__ipipe_virtual_irq_map)) |
91 |
return -EINVAL; |
92 |
} else if (irq_to_desc(irq) == NULL) |
93 |
return -EINVAL; |
94 |
#endif |
95 |
local_irq_save_hw(flags); |
96 |
regs.flags = flags; |
97 |
regs.orig_ax = irq; /* Positive value - IRQ won't be acked */ |
98 |
regs.cs = __KERNEL_CS; |
99 |
__ipipe_handle_irq(®s); |
100 |
local_irq_restore_hw(flags); |
101 |
|
102 |
return 1; |
103 |
} |
104 |
|
105 |
int ipipe_get_sysinfo(struct ipipe_sysinfo *info) |
106 |
{ |
107 |
info->ncpus = num_online_cpus(); |
108 |
info->cpufreq = ipipe_cpu_freq(); |
109 |
info->archdep.tmirq = __ipipe_tick_irq; |
110 |
#ifdef CONFIG_X86_TSC |
111 |
info->archdep.tmfreq = ipipe_cpu_freq(); |
112 |
#else /* !CONFIG_X86_TSC */ |
113 |
info->archdep.tmfreq = CLOCK_TICK_RATE; |
114 |
#endif /* CONFIG_X86_TSC */ |
115 |
|
116 |
return 0; |
117 |
} |
118 |
|
119 |
#ifdef CONFIG_X86_UV |
120 |
asmlinkage void uv_bau_message_interrupt(struct pt_regs *regs); |
121 |
#endif |
122 |
#ifdef CONFIG_X86_MCE_THRESHOLD |
123 |
asmlinkage void smp_threshold_interrupt(void); |
124 |
#endif |
125 |
#ifdef CONFIG_X86_NEW_MCE |
126 |
asmlinkage void smp_mce_self_interrupt(void); |
127 |
#endif |
128 |
|
129 |
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) |
130 |
{ |
131 |
desc->ipipe_ack(irq, desc); |
132 |
} |
133 |
|
134 |
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) |
135 |
{ |
136 |
irq_to_desc(irq)->status &= ~IRQ_DISABLED; |
137 |
} |
138 |
|
139 |
#ifdef CONFIG_X86_LOCAL_APIC |
140 |
|
141 |
static void __ipipe_noack_apic(unsigned irq, struct irq_desc *desc) |
142 |
{ |
143 |
} |
144 |
|
145 |
static void __ipipe_ack_apic(unsigned irq, struct irq_desc *desc) |
146 |
{ |
147 |
__ack_APIC_irq(); |
148 |
} |
149 |
|
150 |
static void __ipipe_null_handler(unsigned irq, void *cookie) |
151 |
{ |
152 |
} |
153 |
|
154 |
#endif /* CONFIG_X86_LOCAL_APIC */ |
155 |
|
156 |
/* __ipipe_enable_pipeline() -- We are running on the boot CPU, hw |
157 |
interrupts are off, and secondary CPUs are still lost in space. */ |
158 |
|
159 |
void __init __ipipe_enable_pipeline(void) |
160 |
{ |
161 |
unsigned int vector, irq; |
162 |
|
163 |
#ifdef CONFIG_X86_LOCAL_APIC |
164 |
|
165 |
/* Map the APIC system vectors. */ |
166 |
|
167 |
ipipe_virtualize_irq(ipipe_root_domain, |
168 |
ipipe_apic_vector_irq(LOCAL_TIMER_VECTOR), |
169 |
(ipipe_irq_handler_t)&smp_apic_timer_interrupt, |
170 |
NULL, |
171 |
&__ipipe_ack_apic, |
172 |
IPIPE_STDROOT_MASK); |
173 |
|
174 |
ipipe_virtualize_irq(ipipe_root_domain, |
175 |
ipipe_apic_vector_irq(SPURIOUS_APIC_VECTOR), |
176 |
(ipipe_irq_handler_t)&smp_spurious_interrupt, |
177 |
NULL, |
178 |
&__ipipe_noack_apic, |
179 |
IPIPE_STDROOT_MASK); |
180 |
|
181 |
ipipe_virtualize_irq(ipipe_root_domain, |
182 |
ipipe_apic_vector_irq(ERROR_APIC_VECTOR), |
183 |
(ipipe_irq_handler_t)&smp_error_interrupt, |
184 |
NULL, |
185 |
&__ipipe_ack_apic, |
186 |
IPIPE_STDROOT_MASK); |
187 |
|
188 |
ipipe_virtualize_irq(ipipe_root_domain, |
189 |
ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR0), |
190 |
&__ipipe_null_handler, |
191 |
NULL, |
192 |
&__ipipe_ack_apic, |
193 |
IPIPE_STDROOT_MASK); |
194 |
|
195 |
ipipe_virtualize_irq(ipipe_root_domain, |
196 |
ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR1), |
197 |
&__ipipe_null_handler, |
198 |
NULL, |
199 |
&__ipipe_ack_apic, |
200 |
IPIPE_STDROOT_MASK); |
201 |
|
202 |
ipipe_virtualize_irq(ipipe_root_domain, |
203 |
ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR2), |
204 |
&__ipipe_null_handler, |
205 |
NULL, |
206 |
&__ipipe_ack_apic, |
207 |
IPIPE_STDROOT_MASK); |
208 |
|
209 |
ipipe_virtualize_irq(ipipe_root_domain, |
210 |
ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR3), |
211 |
&__ipipe_null_handler, |
212 |
NULL, |
213 |
&__ipipe_ack_apic, |
214 |
IPIPE_STDROOT_MASK); |
215 |
|
216 |
#ifdef CONFIG_X86_THERMAL_VECTOR |
217 |
ipipe_virtualize_irq(ipipe_root_domain, |
218 |
ipipe_apic_vector_irq(THERMAL_APIC_VECTOR), |
219 |
(ipipe_irq_handler_t)&smp_thermal_interrupt, |
220 |
NULL, |
221 |
&__ipipe_ack_apic, |
222 |
IPIPE_STDROOT_MASK); |
223 |
#endif /* CONFIG_X86_THERMAL_VECTOR */ |
224 |
|
225 |
#ifdef CONFIG_X86_MCE_THRESHOLD |
226 |
ipipe_virtualize_irq(ipipe_root_domain, |
227 |
ipipe_apic_vector_irq(THRESHOLD_APIC_VECTOR), |
228 |
(ipipe_irq_handler_t)&smp_threshold_interrupt, |
229 |
NULL, |
230 |
&__ipipe_ack_apic, |
231 |
IPIPE_STDROOT_MASK); |
232 |
#endif /* CONFIG_X86_MCE_THRESHOLD */ |
233 |
|
234 |
#ifdef CONFIG_X86_NEW_MCE |
235 |
ipipe_virtualize_irq(ipipe_root_domain, |
236 |
ipipe_apic_vector_irq(MCE_SELF_VECTOR), |
237 |
(ipipe_irq_handler_t)&smp_mce_self_interrupt, |
238 |
NULL, |
239 |
&__ipipe_ack_apic, |
240 |
IPIPE_STDROOT_MASK); |
241 |
#endif /* CONFIG_X86_MCE_THRESHOLD */ |
242 |
|
243 |
#ifdef CONFIG_X86_UV |
244 |
ipipe_virtualize_irq(ipipe_root_domain, |
245 |
ipipe_apic_vector_irq(UV_BAU_MESSAGE), |
246 |
(ipipe_irq_handler_t)&uv_bau_message_interrupt, |
247 |
NULL, |
248 |
&__ipipe_ack_apic, |
249 |
IPIPE_STDROOT_MASK); |
250 |
#endif /* CONFIG_X86_UV */ |
251 |
|
252 |
ipipe_virtualize_irq(ipipe_root_domain, |
253 |
ipipe_apic_vector_irq(GENERIC_INTERRUPT_VECTOR), |
254 |
(ipipe_irq_handler_t)&smp_generic_interrupt, |
255 |
NULL, |
256 |
&__ipipe_ack_apic, |
257 |
IPIPE_STDROOT_MASK); |
258 |
|
259 |
#ifdef CONFIG_PERF_COUNTERS |
260 |
ipipe_virtualize_irq(ipipe_root_domain, |
261 |
ipipe_apic_vector_irq(LOCAL_PENDING_VECTOR), |
262 |
(ipipe_irq_handler_t)&perf_pending_interrupt, |
263 |
NULL, |
264 |
&__ipipe_ack_apic, |
265 |
IPIPE_STDROOT_MASK); |
266 |
#endif /* CONFIG_PERF_COUNTERS */ |
267 |
|
268 |
#endif /* CONFIG_X86_LOCAL_APIC */ |
269 |
|
270 |
#ifdef CONFIG_SMP |
271 |
ipipe_virtualize_irq(ipipe_root_domain, |
272 |
ipipe_apic_vector_irq(RESCHEDULE_VECTOR), |
273 |
(ipipe_irq_handler_t)&smp_reschedule_interrupt, |
274 |
NULL, |
275 |
&__ipipe_ack_apic, |
276 |
IPIPE_STDROOT_MASK); |
277 |
|
278 |
for (vector = INVALIDATE_TLB_VECTOR_START; |
279 |
vector <= INVALIDATE_TLB_VECTOR_END; ++vector) |
280 |
ipipe_virtualize_irq(ipipe_root_domain, |
281 |
ipipe_apic_vector_irq(vector), |
282 |
(ipipe_irq_handler_t)&smp_invalidate_interrupt, |
283 |
NULL, |
284 |
&__ipipe_ack_apic, |
285 |
IPIPE_STDROOT_MASK); |
286 |
|
287 |
ipipe_virtualize_irq(ipipe_root_domain, |
288 |
ipipe_apic_vector_irq(CALL_FUNCTION_VECTOR), |
289 |
(ipipe_irq_handler_t)&smp_call_function_interrupt, |
290 |
NULL, |
291 |
&__ipipe_ack_apic, |
292 |
IPIPE_STDROOT_MASK); |
293 |
|
294 |
ipipe_virtualize_irq(ipipe_root_domain, |
295 |
ipipe_apic_vector_irq(CALL_FUNCTION_SINGLE_VECTOR), |
296 |
(ipipe_irq_handler_t)&smp_call_function_single_interrupt, |
297 |
NULL, |
298 |
&__ipipe_ack_apic, |
299 |
IPIPE_STDROOT_MASK); |
300 |
|
301 |
ipipe_virtualize_irq(ipipe_root_domain, |
302 |
IRQ_MOVE_CLEANUP_VECTOR, |
303 |
(ipipe_irq_handler_t)&smp_irq_move_cleanup_interrupt, |
304 |
NULL, |
305 |
&__ipipe_ack_apic, |
306 |
IPIPE_STDROOT_MASK); |
307 |
|
308 |
ipipe_virtualize_irq(ipipe_root_domain, |
309 |
ipipe_apic_vector_irq(REBOOT_VECTOR), |
310 |
(ipipe_irq_handler_t)&smp_reboot_interrupt, |
311 |
NULL, |
312 |
&__ipipe_ack_apic, |
313 |
IPIPE_STDROOT_MASK); |
314 |
#else |
315 |
(void)vector; |
316 |
#endif /* CONFIG_SMP */ |
317 |
|
318 |
/* Finally, virtualize the remaining ISA and IO-APIC |
319 |
* interrupts. Interrupts which have already been virtualized |
320 |
* will just beget a silent -EPERM error since |
321 |
* IPIPE_SYSTEM_MASK has been passed for them, that's ok. */ |
322 |
|
323 |
for (irq = 0; irq < NR_IRQS; irq++) |
324 |
/* |
325 |
* Fails for IPIPE_CRITICAL_IPI and IRQ_MOVE_CLEANUP_VECTOR, |
326 |
* but that's ok. |
327 |
*/ |
328 |
ipipe_virtualize_irq(ipipe_root_domain, |
329 |
irq, |
330 |
(ipipe_irq_handler_t)&do_IRQ, |
331 |
NULL, |
332 |
&__ipipe_ack_irq, |
333 |
IPIPE_STDROOT_MASK); |
334 |
|
335 |
#ifdef CONFIG_X86_LOCAL_APIC |
336 |
/* Eventually allow these vectors to be reprogrammed. */ |
337 |
ipipe_root_domain->irqs[IPIPE_SERVICE_IPI0].control &= ~IPIPE_SYSTEM_MASK; |
338 |
ipipe_root_domain->irqs[IPIPE_SERVICE_IPI1].control &= ~IPIPE_SYSTEM_MASK; |
339 |
ipipe_root_domain->irqs[IPIPE_SERVICE_IPI2].control &= ~IPIPE_SYSTEM_MASK; |
340 |
ipipe_root_domain->irqs[IPIPE_SERVICE_IPI3].control &= ~IPIPE_SYSTEM_MASK; |
341 |
#endif /* CONFIG_X86_LOCAL_APIC */ |
342 |
} |
343 |
|
344 |
#ifdef CONFIG_SMP |
345 |
|
346 |
cpumask_t __ipipe_set_irq_affinity(unsigned irq, cpumask_t cpumask) |
347 |
{ |
348 |
cpumask_t oldmask; |
349 |
|
350 |
if (irq_to_desc(irq)->chip->set_affinity == NULL) |
351 |
return CPU_MASK_NONE; |
352 |
|
353 |
if (cpus_empty(cpumask)) |
354 |
return CPU_MASK_NONE; /* Return mask value -- no change. */ |
355 |
|
356 |
cpus_and(cpumask, cpumask, cpu_online_map); |
357 |
if (cpus_empty(cpumask)) |
358 |
return CPU_MASK_NONE; /* Error -- bad mask value or non-routable IRQ. */ |
359 |
|
360 |
cpumask_copy(&oldmask, irq_to_desc(irq)->affinity); |
361 |
irq_to_desc(irq)->chip->set_affinity(irq, &cpumask); |
362 |
|
363 |
return oldmask; |
364 |
} |
365 |
|
366 |
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask) |
367 |
{ |
368 |
unsigned long flags; |
369 |
int self; |
370 |
|
371 |
if (ipi != IPIPE_SERVICE_IPI0 && |
372 |
ipi != IPIPE_SERVICE_IPI1 && |
373 |
ipi != IPIPE_SERVICE_IPI2 && |
374 |
ipi != IPIPE_SERVICE_IPI3) |
375 |
return -EINVAL; |
376 |
|
377 |
local_irq_save_hw(flags); |
378 |
|
379 |
self = cpu_isset(ipipe_processor_id(),cpumask); |
380 |
cpu_clear(ipipe_processor_id(), cpumask); |
381 |
|
382 |
if (!cpus_empty(cpumask)) |
383 |
apic->send_IPI_mask(&cpumask, ipipe_apic_irq_vector(ipi)); |
384 |
|
385 |
if (self) |
386 |
ipipe_trigger_irq(ipi); |
387 |
|
388 |
local_irq_restore_hw(flags); |
389 |
|
390 |
return 0; |
391 |
} |
392 |
|
393 |
/* Always called with hw interrupts off. */ |
394 |
|
395 |
void __ipipe_do_critical_sync(unsigned irq, void *cookie) |
396 |
{ |
397 |
int cpu = ipipe_processor_id(); |
398 |
|
399 |
cpu_set(cpu, __ipipe_cpu_sync_map); |
400 |
|
401 |
/* Now we are in sync with the lock requestor running on another |
402 |
CPU. Enter a spinning wait until he releases the global |
403 |
lock. */ |
404 |
spin_lock(&__ipipe_cpu_barrier); |
405 |
|
406 |
/* Got it. Now get out. */ |
407 |
|
408 |
if (__ipipe_cpu_sync) |
409 |
/* Call the sync routine if any. */ |
410 |
__ipipe_cpu_sync(); |
411 |
|
412 |
spin_unlock(&__ipipe_cpu_barrier); |
413 |
|
414 |
cpu_clear(cpu, __ipipe_cpu_sync_map); |
415 |
} |
416 |
|
417 |
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd) |
418 |
{ |
419 |
ipd->irqs[IPIPE_CRITICAL_IPI].acknowledge = &__ipipe_ack_apic; |
420 |
ipd->irqs[IPIPE_CRITICAL_IPI].handler = &__ipipe_do_critical_sync; |
421 |
ipd->irqs[IPIPE_CRITICAL_IPI].cookie = NULL; |
422 |
/* Immediately handle in the current domain but *never* pass */ |
423 |
ipd->irqs[IPIPE_CRITICAL_IPI].control = |
424 |
IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK; |
425 |
} |
426 |
|
427 |
#endif /* CONFIG_SMP */ |
428 |
|
429 |
/* |
430 |
* ipipe_critical_enter() -- Grab the superlock excluding all CPUs but |
431 |
* the current one from a critical section. This lock is used when we |
432 |
* must enforce a global critical section for a single CPU in a |
433 |
* possibly SMP system whichever context the CPUs are running. |
434 |
*/ |
435 |
unsigned long ipipe_critical_enter(void (*syncfn) (void)) |
436 |
{ |
437 |
unsigned long flags; |
438 |
|
439 |
local_irq_save_hw(flags); |
440 |
|
441 |
#ifdef CONFIG_SMP |
442 |
if (unlikely(num_online_cpus() == 1)) |
443 |
return flags; |
444 |
|
445 |
{ |
446 |
int cpu = ipipe_processor_id(); |
447 |
cpumask_t lock_map; |
448 |
|
449 |
if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) { |
450 |
while (test_and_set_bit(0, &__ipipe_critical_lock)) { |
451 |
int n = 0; |
452 |
do { |
453 |
cpu_relax(); |
454 |
} while (++n < cpu); |
455 |
} |
456 |
|
457 |
spin_lock(&__ipipe_cpu_barrier); |
458 |
|
459 |
__ipipe_cpu_sync = syncfn; |
460 |
|
461 |
/* Send the sync IPI to all processors but the current one. */ |
462 |
apic->send_IPI_allbutself(IPIPE_CRITICAL_VECTOR); |
463 |
|
464 |
cpus_andnot(lock_map, cpu_online_map, __ipipe_cpu_lock_map); |
465 |
|
466 |
while (!cpus_equal(__ipipe_cpu_sync_map, lock_map)) |
467 |
cpu_relax(); |
468 |
} |
469 |
|
470 |
atomic_inc(&__ipipe_critical_count); |
471 |
} |
472 |
#endif /* CONFIG_SMP */ |
473 |
|
474 |
return flags; |
475 |
} |
476 |
|
477 |
/* ipipe_critical_exit() -- Release the superlock. */ |
478 |
|
479 |
void ipipe_critical_exit(unsigned long flags) |
480 |
{ |
481 |
#ifdef CONFIG_SMP |
482 |
if (num_online_cpus() == 1) |
483 |
goto out; |
484 |
|
485 |
if (atomic_dec_and_test(&__ipipe_critical_count)) { |
486 |
spin_unlock(&__ipipe_cpu_barrier); |
487 |
|
488 |
while (!cpus_empty(__ipipe_cpu_sync_map)) |
489 |
cpu_relax(); |
490 |
|
491 |
cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map); |
492 |
clear_bit(0, &__ipipe_critical_lock); |
493 |
smp_mb__after_clear_bit(); |
494 |
} |
495 |
out: |
496 |
#endif /* CONFIG_SMP */ |
497 |
|
498 |
local_irq_restore_hw(flags); |
499 |
} |
500 |
|
501 |
static inline void __fixup_if(int s, struct pt_regs *regs) |
502 |
{ |
503 |
/* |
504 |
* Have the saved hw state look like the domain stall bit, so |
505 |
* that __ipipe_unstall_iret_root() restores the proper |
506 |
* pipeline state for the root stage upon exit. |
507 |
*/ |
508 |
if (s) |
509 |
regs->flags &= ~X86_EFLAGS_IF; |
510 |
else |
511 |
regs->flags |= X86_EFLAGS_IF; |
512 |
} |
513 |
|
514 |
#ifdef CONFIG_X86_32 |
515 |
|
516 |
/* |
517 |
* Check the stall bit of the root domain to make sure the existing |
518 |
* preemption opportunity upon in-kernel resumption could be |
519 |
* exploited. In case a rescheduling could take place, the root stage |
520 |
* is stalled before the hw interrupts are re-enabled. This routine |
521 |
* must be called with hw interrupts off. |
522 |
*/ |
523 |
|
524 |
asmlinkage int __ipipe_kpreempt_root(struct pt_regs regs) |
525 |
{ |
526 |
if (test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status))) |
527 |
/* Root stage is stalled: rescheduling denied. */ |
528 |
return 0; |
529 |
|
530 |
__ipipe_stall_root(); |
531 |
trace_hardirqs_off(); |
532 |
local_irq_enable_hw_notrace(); |
533 |
|
534 |
return 1; /* Ok, may reschedule now. */ |
535 |
} |
536 |
|
537 |
asmlinkage void __ipipe_unstall_iret_root(struct pt_regs regs) |
538 |
{ |
539 |
struct ipipe_percpu_domain_data *p; |
540 |
|
541 |
/* Emulate IRET's handling of the interrupt flag. */ |
542 |
|
543 |
local_irq_disable_hw(); |
544 |
|
545 |
p = ipipe_root_cpudom_ptr(); |
546 |
|
547 |
/* |
548 |
* Restore the software state as it used to be on kernel |
549 |
* entry. CAUTION: NMIs must *not* return through this |
550 |
* emulation. |
551 |
*/ |
552 |
if (raw_irqs_disabled_flags(regs.flags)) { |
553 |
if (!__test_and_set_bit(IPIPE_STALL_FLAG, &p->status)) |
554 |
trace_hardirqs_off(); |
555 |
regs.flags |= X86_EFLAGS_IF; |
556 |
} else { |
557 |
if (test_bit(IPIPE_STALL_FLAG, &p->status)) { |
558 |
trace_hardirqs_on(); |
559 |
__clear_bit(IPIPE_STALL_FLAG, &p->status); |
560 |
} |
561 |
/* |
562 |
* We could have received and logged interrupts while |
563 |
* stalled in the syscall path: play the log now to |
564 |
* release any pending event. The SYNC_BIT prevents |
565 |
* infinite recursion in case of flooding. |
566 |
*/ |
567 |
if (unlikely(__ipipe_ipending_p(p))) |
568 |
__ipipe_sync_pipeline(IPIPE_IRQ_DOALL); |
569 |
} |
570 |
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF |
571 |
ipipe_trace_end(0x8000000D); |
572 |
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ |
573 |
} |
574 |
|
575 |
#else /* !CONFIG_X86_32 */ |
576 |
|
577 |
#ifdef CONFIG_PREEMPT |
578 |
|
579 |
asmlinkage void preempt_schedule_irq(void); |
580 |
|
581 |
void __ipipe_preempt_schedule_irq(void) |
582 |
{ |
583 |
struct ipipe_percpu_domain_data *p; |
584 |
unsigned long flags; |
585 |
/* |
586 |
* We have no IRQ state fixup on entry to exceptions in |
587 |
* x86_64, so we have to stall the root stage before |
588 |
* rescheduling. |
589 |
*/ |
590 |
BUG_ON(!irqs_disabled_hw()); |
591 |
local_irq_save(flags); |
592 |
local_irq_enable_hw(); |
593 |
preempt_schedule_irq(); /* Ok, may reschedule now. */ |
594 |
local_irq_disable_hw(); |
595 |
|
596 |
/* |
597 |
* Flush any pending interrupt that may have been logged after |
598 |
* preempt_schedule_irq() stalled the root stage before |
599 |
* returning to us, and now. |
600 |
*/ |
601 |
p = ipipe_root_cpudom_ptr(); |
602 |
if (unlikely(__ipipe_ipending_p(p))) { |
603 |
add_preempt_count(PREEMPT_ACTIVE); |
604 |
trace_hardirqs_on(); |
605 |
clear_bit(IPIPE_STALL_FLAG, &p->status); |
606 |
__ipipe_sync_pipeline(IPIPE_IRQ_DOALL); |
607 |
sub_preempt_count(PREEMPT_ACTIVE); |
608 |
} |
609 |
|
610 |
__local_irq_restore_nosync(flags); |
611 |
} |
612 |
|
613 |
#endif /* CONFIG_PREEMPT */ |
614 |
|
615 |
#endif /* !CONFIG_X86_32 */ |
616 |
|
617 |
void __ipipe_halt_root(void) |
618 |
{ |
619 |
struct ipipe_percpu_domain_data *p; |
620 |
|
621 |
/* Emulate sti+hlt sequence over the root domain. */ |
622 |
|
623 |
local_irq_disable_hw(); |
624 |
|
625 |
p = ipipe_root_cpudom_ptr(); |
626 |
|
627 |
trace_hardirqs_on(); |
628 |
clear_bit(IPIPE_STALL_FLAG, &p->status); |
629 |
|
630 |
if (unlikely(__ipipe_ipending_p(p))) { |
631 |
__ipipe_sync_pipeline(IPIPE_IRQ_DOALL); |
632 |
local_irq_enable_hw(); |
633 |
} else { |
634 |
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF |
635 |
ipipe_trace_end(0x8000000E); |
636 |
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ |
637 |
asm volatile("sti; hlt": : :"memory"); |
638 |
} |
639 |
} |
640 |
|
641 |
static void do_machine_check_vector(struct pt_regs *regs, long error_code) |
642 |
{ |
643 |
#ifdef CONFIG_X86_MCE |
644 |
#ifdef CONFIG_X86_32 |
645 |
extern void (*machine_check_vector)(struct pt_regs *, long error_code); |
646 |
machine_check_vector(regs, error_code); |
647 |
#else |
648 |
do_machine_check(regs, error_code); |
649 |
#endif |
650 |
#endif /* CONFIG_X86_MCE */ |
651 |
} |
652 |
|
653 |
/* Work around genksyms's issue with over-qualification in decls. */ |
654 |
|
655 |
typedef void dotraplinkage __ipipe_exhandler(struct pt_regs *, long); |
656 |
|
657 |
typedef __ipipe_exhandler *__ipipe_exptr; |
658 |
|
659 |
static __ipipe_exptr __ipipe_std_extable[] = { |
660 |
|
661 |
[ex_do_divide_error] = &do_divide_error, |
662 |
[ex_do_overflow] = &do_overflow, |
663 |
[ex_do_bounds] = &do_bounds, |
664 |
[ex_do_invalid_op] = &do_invalid_op, |
665 |
[ex_do_coprocessor_segment_overrun] = &do_coprocessor_segment_overrun, |
666 |
[ex_do_invalid_TSS] = &do_invalid_TSS, |
667 |
[ex_do_segment_not_present] = &do_segment_not_present, |
668 |
[ex_do_stack_segment] = &do_stack_segment, |
669 |
[ex_do_general_protection] = do_general_protection, |
670 |
[ex_do_page_fault] = (__ipipe_exptr)&do_page_fault, |
671 |
[ex_do_spurious_interrupt_bug] = &do_spurious_interrupt_bug, |
672 |
[ex_do_coprocessor_error] = &do_coprocessor_error, |
673 |
[ex_do_alignment_check] = &do_alignment_check, |
674 |
[ex_machine_check_vector] = &do_machine_check_vector, |
675 |
[ex_do_simd_coprocessor_error] = &do_simd_coprocessor_error, |
676 |
[ex_do_device_not_available] = &do_device_not_available, |
677 |
#ifdef CONFIG_X86_32 |
678 |
[ex_do_iret_error] = &do_iret_error, |
679 |
#endif |
680 |
}; |
681 |
|
682 |
#ifdef CONFIG_KGDB |
683 |
#include <linux/kgdb.h> |
684 |
|
685 |
static int __ipipe_xlate_signo[] = { |
686 |
|
687 |
[ex_do_divide_error] = SIGFPE, |
688 |
[ex_do_debug] = SIGTRAP, |
689 |
[2] = -1, |
690 |
[ex_do_int3] = SIGTRAP, |
691 |
[ex_do_overflow] = SIGSEGV, |
692 |
[ex_do_bounds] = SIGSEGV, |
693 |
[ex_do_invalid_op] = SIGILL, |
694 |
[ex_do_device_not_available] = -1, |
695 |
[8] = -1, |
696 |
[ex_do_coprocessor_segment_overrun] = SIGFPE, |
697 |
[ex_do_invalid_TSS] = SIGSEGV, |
698 |
[ex_do_segment_not_present] = SIGBUS, |
699 |
[ex_do_stack_segment] = SIGBUS, |
700 |
[ex_do_general_protection] = SIGSEGV, |
701 |
[ex_do_page_fault] = SIGSEGV, |
702 |
[ex_do_spurious_interrupt_bug] = -1, |
703 |
[ex_do_coprocessor_error] = -1, |
704 |
[ex_do_alignment_check] = SIGBUS, |
705 |
[ex_machine_check_vector] = -1, |
706 |
[ex_do_simd_coprocessor_error] = -1, |
707 |
[20 ... 31] = -1, |
708 |
#ifdef CONFIG_X86_32 |
709 |
[ex_do_iret_error] = SIGSEGV, |
710 |
#endif |
711 |
}; |
712 |
#endif /* CONFIG_KGDB */ |
713 |
|
714 |
int __ipipe_handle_exception(struct pt_regs *regs, long error_code, int vector) |
715 |
{ |
716 |
bool root_entry = false; |
717 |
unsigned long flags = 0; |
718 |
unsigned long cr2 = 0; |
719 |
|
720 |
if (ipipe_root_domain_p) { |
721 |
root_entry = true; |
722 |
|
723 |
local_save_flags(flags); |
724 |
/* |
725 |
* Replicate hw interrupt state into the virtual mask |
726 |
* before calling the I-pipe event handler over the |
727 |
* root domain. Also required later when calling the |
728 |
* Linux exception handler. |
729 |
*/ |
730 |
if (irqs_disabled_hw()) |
731 |
local_irq_disable(); |
732 |
} |
733 |
#ifdef CONFIG_KGDB |
734 |
/* catch exception KGDB is interested in over non-root domains */ |
735 |
else if (__ipipe_xlate_signo[vector] >= 0 && |
736 |
!kgdb_handle_exception(vector, __ipipe_xlate_signo[vector], |
737 |
error_code, regs)) |
738 |
return 1; |
739 |
#endif /* CONFIG_KGDB */ |
740 |
|
741 |
if (vector == ex_do_page_fault) |
742 |
cr2 = native_read_cr2(); |
743 |
|
744 |
if (unlikely(ipipe_trap_notify(vector, regs))) { |
745 |
if (root_entry) |
746 |
local_irq_restore_nosync(flags); |
747 |
return 1; |
748 |
} |
749 |
|
750 |
if (likely(ipipe_root_domain_p)) { |
751 |
/* |
752 |
* In case we faulted in the iret path, regs.flags do not |
753 |
* match the root domain state. The fault handler or the |
754 |
* low-level return code may evaluate it. Fix this up, either |
755 |
* by the root state sampled on entry or, if we migrated to |
756 |
* root, with the current state. |
757 |
*/ |
758 |
__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) : |
759 |
raw_irqs_disabled(), regs); |
760 |
} else { |
761 |
/* Detect unhandled faults over non-root domains. */ |
762 |
struct ipipe_domain *ipd = ipipe_current_domain; |
763 |
|
764 |
/* Switch to root so that Linux can handle the fault cleanly. */ |
765 |
__ipipe_current_domain = ipipe_root_domain; |
766 |
|
767 |
ipipe_trace_panic_freeze(); |
768 |
|
769 |
/* Always warn about user land and unfixable faults. */ |
770 |
if ((error_code & 4) || !search_exception_tables(instruction_pointer(regs))) { |
771 |
printk(KERN_ERR "BUG: Unhandled exception over domain" |
772 |
" %s at 0x%lx - switching to ROOT\n", |
773 |
ipd->name, instruction_pointer(regs)); |
774 |
dump_stack(); |
775 |
ipipe_trace_panic_dump(); |
776 |
#ifdef CONFIG_IPIPE_DEBUG |
777 |
/* Also report fixable ones when debugging is enabled. */ |
778 |
} else { |
779 |
printk(KERN_WARNING "WARNING: Fixable exception over " |
780 |
"domain %s at 0x%lx - switching to ROOT\n", |
781 |
ipd->name, instruction_pointer(regs)); |
782 |
dump_stack(); |
783 |
ipipe_trace_panic_dump(); |
784 |
#endif /* CONFIG_IPIPE_DEBUG */ |
785 |
} |
786 |
} |
787 |
|
788 |
if (vector == ex_do_page_fault) |
789 |
write_cr2(cr2); |
790 |
|
791 |
__ipipe_std_extable[vector](regs, error_code); |
792 |
|
793 |
/* |
794 |
* Relevant for 64-bit: Restore root domain state as the low-level |
795 |
* return code will not align it to regs.flags. |
796 |
*/ |
797 |
if (root_entry) |
798 |
local_irq_restore_nosync(flags); |
799 |
|
800 |
return 0; |
801 |
} |
802 |
|
803 |
int __ipipe_divert_exception(struct pt_regs *regs, int vector) |
804 |
{ |
805 |
bool root_entry = false; |
806 |
unsigned long flags = 0; |
807 |
|
808 |
if (ipipe_root_domain_p) { |
809 |
root_entry = true; |
810 |
|
811 |
local_save_flags(flags); |
812 |
|
813 |
if (irqs_disabled_hw()) { |
814 |
/* |
815 |
* Same root state handling as in |
816 |
* __ipipe_handle_exception. |
817 |
*/ |
818 |
local_irq_disable(); |
819 |
} |
820 |
} |
821 |
#ifdef CONFIG_KGDB |
822 |
/* catch int1 and int3 over non-root domains */ |
823 |
else { |
824 |
#ifdef CONFIG_X86_32 |
825 |
if (vector != ex_do_device_not_available) |
826 |
#endif |
827 |
{ |
828 |
unsigned int condition = 0; |
829 |
|
830 |
if (vector == 1) |
831 |
get_debugreg(condition, 6); |
832 |
if (!kgdb_handle_exception(vector, SIGTRAP, condition, regs)) |
833 |
return 1; |
834 |
} |
835 |
} |
836 |
#endif /* CONFIG_KGDB */ |
837 |
|
838 |
if (unlikely(ipipe_trap_notify(vector, regs))) { |
839 |
if (root_entry) |
840 |
local_irq_restore_nosync(flags); |
841 |
return 1; |
842 |
} |
843 |
|
844 |
/* see __ipipe_handle_exception */ |
845 |
if (likely(ipipe_root_domain_p)) |
846 |
__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) : |
847 |
raw_irqs_disabled(), regs); |
848 |
/* |
849 |
* No need to restore root state in the 64-bit case, the Linux handler |
850 |
* and the return code will take care of it. |
851 |
*/ |
852 |
|
853 |
return 0; |
854 |
} |
855 |
|
856 |
int __ipipe_syscall_root(struct pt_regs *regs) |
857 |
{ |
858 |
struct ipipe_percpu_domain_data *p; |
859 |
unsigned long flags; |
860 |
int ret; |
861 |
|
862 |
/* |
863 |
* This routine either returns: |
864 |
* 0 -- if the syscall is to be passed to Linux; |
865 |
* >0 -- if the syscall should not be passed to Linux, and no |
866 |
* tail work should be performed; |
867 |
* <0 -- if the syscall should not be passed to Linux but the |
868 |
* tail work has to be performed (for handling signals etc). |
869 |
*/ |
870 |
|
871 |
if (!__ipipe_syscall_watched_p(current, regs->orig_ax) || |
872 |
!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) |
873 |
return 0; |
874 |
|
875 |
ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); |
876 |
if (!ipipe_root_domain_p) { |
877 |
#ifdef CONFIG_X86_64 |
878 |
local_irq_disable_hw(); |
879 |
#endif |
880 |
return 1; |
881 |
} |
882 |
|
883 |
local_irq_save_hw(flags); |
884 |
p = ipipe_root_cpudom_ptr(); |
885 |
#ifdef CONFIG_X86_32 |
886 |
/* |
887 |
* Fix-up only required on 32-bit as only here the IRET return code |
888 |
* will evaluate the flags. |
889 |
*/ |
890 |
__fixup_if(test_bit(IPIPE_STALL_FLAG, &p->status), regs); |
891 |
#endif |
892 |
/* |
893 |
* If allowed, sync pending VIRQs before _TIF_NEED_RESCHED is |
894 |
* tested. |
895 |
*/ |
896 |
if (__ipipe_ipending_p(p)) |
897 |
__ipipe_sync_pipeline(IPIPE_IRQ_DOVIRT); |
898 |
#ifdef CONFIG_X86_64 |
899 |
if (!ret) |
900 |
#endif |
901 |
local_irq_restore_hw(flags); |
902 |
|
903 |
return -ret; |
904 |
} |
905 |
|
906 |
/* |
907 |
* __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic |
908 |
* interrupt protection log is maintained here for each domain. Hw |
909 |
* interrupts are off on entry. |
910 |
*/ |
911 |
int __ipipe_handle_irq(struct pt_regs *regs) |
912 |
{ |
913 |
struct ipipe_domain *this_domain, *next_domain; |
914 |
unsigned int vector = regs->orig_ax, irq; |
915 |
struct list_head *head, *pos; |
916 |
int m_ack; |
917 |
|
918 |
if ((long)regs->orig_ax < 0) { |
919 |
vector = ~vector; |
920 |
#ifdef CONFIG_X86_LOCAL_APIC |
921 |
if (vector >= FIRST_SYSTEM_VECTOR) |
922 |
irq = ipipe_apic_vector_irq(vector); |
923 |
#ifdef CONFIG_SMP |
924 |
else if (vector == IRQ_MOVE_CLEANUP_VECTOR) |
925 |
irq = vector; |
926 |
#endif /* CONFIG_SMP */ |
927 |
else |
928 |
#endif /* CONFIG_X86_LOCAL_APIC */ |
929 |
irq = __get_cpu_var(vector_irq)[vector]; |
930 |
m_ack = 0; |
931 |
} else { /* This is a self-triggered one. */ |
932 |
irq = vector; |
933 |
m_ack = 1; |
934 |
} |
935 |
|
936 |
this_domain = ipipe_current_domain; |
937 |
|
938 |
if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)) |
939 |
head = &this_domain->p_link; |
940 |
else { |
941 |
head = __ipipe_pipeline.next; |
942 |
next_domain = list_entry(head, struct ipipe_domain, p_link); |
943 |
if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) { |
944 |
if (!m_ack && next_domain->irqs[irq].acknowledge) |
945 |
next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq)); |
946 |
__ipipe_dispatch_wired(next_domain, irq); |
947 |
goto finalize_nosync; |
948 |
} |
949 |
} |
950 |
|
951 |
/* Ack the interrupt. */ |
952 |
|
953 |
pos = head; |
954 |
|
955 |
while (pos != &__ipipe_pipeline) { |
956 |
next_domain = list_entry(pos, struct ipipe_domain, p_link); |
957 |
if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) { |
958 |
__ipipe_set_irq_pending(next_domain, irq); |
959 |
if (!m_ack && next_domain->irqs[irq].acknowledge) { |
960 |
next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq)); |
961 |
m_ack = 1; |
962 |
} |
963 |
} |
964 |
if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control)) |
965 |
break; |
966 |
pos = next_domain->p_link.next; |
967 |
} |
968 |
|
969 |
/* |
970 |
* If the interrupt preempted the head domain, then do not |
971 |
* even try to walk the pipeline, unless an interrupt is |
972 |
* pending for it. |
973 |
*/ |
974 |
if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && |
975 |
!__ipipe_ipending_p(ipipe_head_cpudom_ptr())) |
976 |
goto finalize_nosync; |
977 |
|
978 |
/* |
979 |
* Now walk the pipeline, yielding control to the highest |
980 |
* priority domain that has pending interrupt(s) or |
981 |
* immediately to the current domain if the interrupt has been |
982 |
* marked as 'sticky'. This search does not go beyond the |
983 |
* current domain in the pipeline. |
984 |
*/ |
985 |
|
986 |
__ipipe_walk_pipeline(head); |
987 |
|
988 |
finalize_nosync: |
989 |
|
990 |
/* |
991 |
* Given our deferred dispatching model for regular IRQs, we |
992 |
* only record CPU regs for the last timer interrupt, so that |
993 |
* the timer handler charges CPU times properly. It is assumed |
994 |
* that other interrupt handlers don't actually care for such |
995 |
* information. |
996 |
*/ |
997 |
|
998 |
if (irq == __ipipe_tick_irq) { |
999 |
struct pt_regs *tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs); |
1000 |
tick_regs->flags = regs->flags; |
1001 |
tick_regs->cs = regs->cs; |
1002 |
tick_regs->ip = regs->ip; |
1003 |
tick_regs->bp = regs->bp; |
1004 |
#ifdef CONFIG_X86_64 |
1005 |
tick_regs->ss = regs->ss; |
1006 |
tick_regs->sp = regs->sp; |
1007 |
#endif |
1008 |
if (!ipipe_root_domain_p) |
1009 |
tick_regs->flags &= ~X86_EFLAGS_IF; |
1010 |
} |
1011 |
|
1012 |
if (!ipipe_root_domain_p || |
1013 |
test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status))) |
1014 |
return 0; |
1015 |
|
1016 |
#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) |
1017 |
/* |
1018 |
* Prevent a spurious rescheduling from being triggered on |
1019 |
* preemptible kernels along the way out through |
1020 |
* ret_from_intr. |
1021 |
*/ |
1022 |
if ((long)regs->orig_ax < 0) |
1023 |
__set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)); |
1024 |
#endif /* CONFIG_SMP */ |
1025 |
|
1026 |
return 1; |
1027 |
} |
1028 |
|
1029 |
int __ipipe_check_tickdev(const char *devname) |
1030 |
{ |
1031 |
#ifdef CONFIG_X86_LOCAL_APIC |
1032 |
if (!strcmp(devname, "lapic")) |
1033 |
return __ipipe_check_lapic(); |
1034 |
#endif |
1035 |
|
1036 |
return 1; |
1037 |
} |
1038 |
|
1039 |
void *ipipe_irq_handler = __ipipe_handle_irq; |
1040 |
EXPORT_SYMBOL(ipipe_irq_handler); |
1041 |
EXPORT_SYMBOL(io_apic_irqs); |
1042 |
EXPORT_PER_CPU_SYMBOL(__ipipe_tick_regs); |
1043 |
__attribute__((regparm(3))) void do_notify_resume(struct pt_regs *, void *, __u32); |
1044 |
EXPORT_SYMBOL(do_notify_resume); |
1045 |
extern void *sys_call_table; |
1046 |
EXPORT_SYMBOL(sys_call_table); |
1047 |
#ifdef CONFIG_X86_32 |
1048 |
extern void ret_from_intr(void); |
1049 |
EXPORT_SYMBOL(ret_from_intr); |
1050 |
extern spinlock_t i8259A_lock; |
1051 |
extern struct desc_struct idt_table[]; |
1052 |
#else |
1053 |
extern ipipe_spinlock_t i8259A_lock; |
1054 |
extern gate_desc idt_table[]; |
1055 |
#endif |
1056 |
EXPORT_PER_CPU_SYMBOL(vector_irq); |
1057 |
EXPORT_SYMBOL(idt_table); |
1058 |
EXPORT_SYMBOL(i8259A_lock); |
1059 |
EXPORT_SYMBOL(__ipipe_sync_stage); |
1060 |
EXPORT_SYMBOL(kill_proc_info); |
1061 |
EXPORT_SYMBOL(find_task_by_pid_ns); |
1062 |
|
1063 |
EXPORT_SYMBOL(__ipipe_tick_irq); |
1064 |
|
1065 |
EXPORT_SYMBOL_GPL(irq_to_desc); |
1066 |
struct task_struct *__switch_to(struct task_struct *prev_p, |
1067 |
struct task_struct *next_p); |
1068 |
EXPORT_SYMBOL_GPL(__switch_to); |
1069 |
EXPORT_SYMBOL_GPL(show_stack); |
1070 |
|
1071 |
EXPORT_PER_CPU_SYMBOL_GPL(init_tss); |
1072 |
#ifdef CONFIG_SMP |
1073 |
EXPORT_PER_CPU_SYMBOL_GPL(cpu_tlbstate); |
1074 |
#endif /* CONFIG_SMP */ |
1075 |
|
1076 |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
1077 |
EXPORT_SYMBOL(tasklist_lock); |
1078 |
#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */ |
1079 |
|
1080 |
#if defined(CONFIG_CC_STACKPROTECTOR) && defined(CONFIG_X86_64) |
1081 |
EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union); |
1082 |
#endif |
1083 |
|
1084 |
EXPORT_SYMBOL(__ipipe_halt_root); |