|
Lines 58-83
Link Here
|
| 58 |
* conflicts. we try to rely on the kernel's provided interfaces when possible, |
58 |
* conflicts. we try to rely on the kernel's provided interfaces when possible, |
| 59 |
* but need additional flushing on earlier kernels. |
59 |
* but need additional flushing on earlier kernels. |
| 60 |
*/ |
60 |
*/ |
| 61 |
#if defined(KERNEL_2_4) |
|
|
| 62 |
/* wrap CACHE_FLUSH so we can pass it to smp_call_function */ |
| 63 |
static void cache_flush(void *p) |
| 64 |
{ |
| 65 |
CACHE_FLUSH(); |
| 66 |
} |
| 67 |
#endif |
| 68 |
|
| 69 |
/* |
61 |
/* |
| 70 |
* 2.4 kernels handle flushing in the change_page_attr() call, but kernels |
62 |
* 2.4 kernels handle flushing in the change_page_attr() call, but kernels |
| 71 |
* earlier than 2.4.27 don't flush on cpus that support Self Snoop, so we |
63 |
* earlier than 2.4.27 don't flush on cpus that support Self Snoop, so we |
| 72 |
* manually flush on these kernels (actually, we go ahead and flush on all |
64 |
* manually flush on these kernels (actually, we go ahead and flush on all |
| 73 |
* 2.4 kernels, as it's possible some others may be missing this fix and |
65 |
* 2.4 kernels, as it's possible some others may be missing this fix and |
| 74 |
* we'd prefer to be a little slower flushing caches than hanging the |
66 |
* we'd prefer to be a little slower flushing caches than hanging the |
| 75 |
* system. 2.6 kernels split the flushing out to a seperate call, |
67 |
* system. |
| 76 |
* global_flush_tlb(), so we rely on that. |
68 |
* 2.6 kernels split the flushing out to a seperate call, |
|
|
69 |
* global_flush_tlb(), so we rely on that. however, there are some 2.6 |
| 70 |
* x86_64 kernels that do not properly flush. for now, we'll flush on all |
| 71 |
* potential kernels, as it's slightly slower, but safer. |
| 77 |
*/ |
72 |
*/ |
|
|
73 |
#if defined(KERNEL_2_4) || (defined(KERNEL_2_6) && defined(NVCPU_X86_64)) |
| 74 |
#define NV_CPA_NEEDS_FLUSHING 1 |
| 75 |
#endif |
| 76 |
|
| 77 |
#if defined(NV_CPA_NEEDS_FLUSHING) |
| 78 |
static void cache_flush(void *p) |
| 79 |
{ |
| 80 |
unsigned long reg0, reg1; |
| 81 |
|
| 82 |
CACHE_FLUSH(); |
| 83 |
|
| 84 |
// flush global TLBs |
| 85 |
#if defined (NVCPU_X86) |
| 86 |
asm volatile("movl %%cr4, %0; \n" |
| 87 |
"andl $~0x80, %0; \n" |
| 88 |
"movl %0, %%cr4; \n" |
| 89 |
"movl %%cr3, %1; \n" |
| 90 |
"movl %1, %%cr3; \n" |
| 91 |
"orl $0x80, %0; \n" |
| 92 |
"movl %0, %%cr4; \n" |
| 93 |
: "=&r" (reg0), "=&r" (reg1) |
| 94 |
: : "memory"); |
| 95 |
#else |
| 96 |
asm volatile("movq %%cr4, %0; \n" |
| 97 |
"andq $~0x80, %0; \n" |
| 98 |
"movq %0, %%cr4; \n" |
| 99 |
"movq %%cr3, %1; \n" |
| 100 |
"movq %1, %%cr3; \n" |
| 101 |
"orq $0x80, %0; \n" |
| 102 |
"movq %0, %%cr4; \n" |
| 103 |
: "=&r" (reg0), "=&r" (reg1) |
| 104 |
: : "memory"); |
| 105 |
#endif |
| 106 |
} |
| 107 |
#endif |
| 108 |
|
| 78 |
static void nv_flush_caches(void) |
109 |
static void nv_flush_caches(void) |
| 79 |
{ |
110 |
{ |
| 80 |
#if defined(KERNEL_2_4) |
111 |
#if defined(NV_CPA_NEEDS_FLUSHING) |
| 81 |
#ifdef CONFIG_SMP |
112 |
#ifdef CONFIG_SMP |
| 82 |
smp_call_function(cache_flush, NULL, 1, 1); |
113 |
smp_call_function(cache_flush, NULL, 1, 1); |
| 83 |
#endif |
114 |
#endif |