Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 42026 Details for
Bug 66605
Amule crypto++ things
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
Removes P4 specific optimizations.
cryptopp.diff (text/plain), 14.60 KB, created by
Mikkel Schubert
on 2004-10-17 05:09:44 UTC
(
hide
)
Description:
Removes P4 specific optimizations.
Filename:
MIME Type:
Creator:
Mikkel Schubert
Created:
2004-10-17 05:09:44 UTC
Size:
14.60 KB
patch
obsolete
>Index: src/CryptoPP.cpp >=================================================================== >RCS file: /opt/cvs/amule-dev-cvs/src/CryptoPP.cpp,v >retrieving revision 1.3 >retrieving revision 1.4 >diff -u -r1.3 -r1.4 >--- src/CryptoPP.cpp 25 Sep 2004 13:49:52 -0000 1.3 >+++ src/CryptoPP.cpp 17 Oct 2004 11:15:45 -0000 1.4 >@@ -2534,98 +2534,6 @@ > > #ifdef CRYPTOPP_X86ASM_AVAILABLE > >-// ************** x86 feature detection *************** >- >-static bool s_sse2Enabled = true; >- >-static void CpuId(word32 input, word32 *output) >-{ >-#ifdef __GNUC__ >- __asm__ >- ( >- // save ebx in case -fPIC is being used >- "push %%ebx; cpuid; mov %%ebx, %%edi; pop %%ebx" >- : "=a" (output[0]), "=D" (output[1]), "=c" (output[2]), "=d" (output[3]) >- : "a" (input) >- ); >-#else >- __asm >- { >- mov eax, input >- cpuid >- mov edi, output >- mov [edi], eax >- mov [edi+4], ebx >- mov [edi+8], ecx >- mov [edi+12], edx >- } >-#endif >-} >- >-#ifdef SSE2_INTRINSICS_AVAILABLE >-#ifndef _MSC_VER >-static jmp_buf s_env; >-static void SigIllHandler(int) >-{ >- longjmp(s_env, 1); >-} >-#endif >- >-static bool HasSSE2() >-{ >- if (!s_sse2Enabled) >- return false; >- >- word32 cpuid[4]; >- CpuId(1, cpuid); >- if ((cpuid[3] & (1 << 26)) == 0) >- return false; >- >-#ifdef _MSC_VER >- __try >- { >- __asm xorpd xmm0, xmm0 // executing SSE2 instruction >- } >- __except (1) >- { >- return false; >- } >- return true; >-#else >- typedef void (*SigHandler)(int); >- >- SigHandler oldHandler = signal(SIGILL, SigIllHandler); >- if (oldHandler == SIG_ERR) >- return false; >- >- bool result = true; >- if (setjmp(s_env)) >- result = false; >- else >- __asm __volatile ("xorps %xmm0, %xmm0"); >- >- signal(SIGILL, oldHandler); >- return result; >-#endif >- >-} >-#endif >- >-static bool IsP4() >-{ >- word32 cpuid[4]; >- >- CpuId(0, cpuid); >- std::swap(cpuid[2], cpuid[3]); >- if (memcmp(cpuid+1, "GenuineIntel", 12) != 0) >- return false; >- >- CpuId(1, cpuid); >- return ((cpuid[0] >> 8) & 0xf) == 0xf; >- >-} >- >- > // ************** Pentium/P4 optimizations *************** > > class PentiumOptimized : public Portable >@@ -2638,18 +2546,6 @@ > static void CRYPTOPP_CDECL Multiply8Bottom(word *C, const word *A, const word *B); > }; > >-class P4Optimized >-{ >-public: >- static word CRYPTOPP_CDECL Add(word *C, const word *A, const word *B, unsigned int N); >- static word CRYPTOPP_CDECL Subtract(word *C, const word *A, const word *B, unsigned int N); >-#ifdef SSE2_INTRINSICS_AVAILABLE >- static void CRYPTOPP_CDECL Multiply4(word *C, const word *A, const word *B); >- static void CRYPTOPP_CDECL Multiply8(word *C, const word *A, const word *B); >- static void CRYPTOPP_CDECL Multiply8Bottom(word *C, const word *A, const word *B); >-#endif >-}; >- > typedef word (CRYPTOPP_CDECL * PAddSub)(word *C, const word *A, const word *B, unsigned int N); > typedef void (CRYPTOPP_CDECL * PMul)(word *C, const word *A, const word *B); > >@@ -2660,41 +2556,18 @@ > > static void SetPentiumFunctionPointers() > { >- if (IsP4()) >- { >- s_pAdd = &P4Optimized::Add; >- s_pSub = &P4Optimized::Subtract; >- } >- else >- { >- s_pAdd = &PentiumOptimized::Add; >- s_pSub = &PentiumOptimized::Subtract; >- } >+ s_pAdd = &PentiumOptimized::Add; >+ s_pSub = &PentiumOptimized::Subtract; > > #ifdef SSE2_INTRINSICS_AVAILABLE >- if (HasSSE2()) >- { >- s_pMul4 = &P4Optimized::Multiply4; >- s_pMul8 = &P4Optimized::Multiply8; >- s_pMul8B = &P4Optimized::Multiply8Bottom; >- } >- else >- { >- s_pMul4 = &PentiumOptimized::Multiply4; >- s_pMul8 = &PentiumOptimized::Multiply8; >- s_pMul8B = &PentiumOptimized::Multiply8Bottom; >- } >+ s_pMul4 = &PentiumOptimized::Multiply4; >+ s_pMul8 = &PentiumOptimized::Multiply8; >+ s_pMul8B = &PentiumOptimized::Multiply8Bottom; > #endif > } > > static const char s_RunAtStartupSetPentiumFunctionPointers = (SetPentiumFunctionPointers(), 0); > >-void DisableSSE2() >-{ >- s_sse2Enabled = false; >- SetPentiumFunctionPointers(); >-} >- > class LowLevel : public PentiumOptimized > { > public: >@@ -2862,102 +2735,6 @@ > AddEpilogue > } > >-// On Pentium 4, the adc and sbb instructions are very expensive, so avoid them. >- >-CRYPTOPP_NAKED word P4Optimized::Add(word *C, const word *A, const word *B, unsigned int N) >-{ >- AddPrologue >- >- // now: ebx = B, ecx = C, edx = A, esi = N >- AS2( xor eax, eax) >- AS1( neg esi) >- AS1( jz loopendAddP4) // if no dwords then nothing to do >- >- AS2( mov edi, [edx]) >- AS2( mov ebp, [ebx]) >- AS1( jmp carry1AddP4) >- >- AS1(loopstartAddP4:) >- AS2( mov edi, [edx+8]) >- AS2( add ecx, 8) >- AS2( add edx, 8) >- AS2( mov ebp, [ebx]) >- AS2( add edi, eax) >- AS1( jc carry1AddP4) >- AS2( xor eax, eax) >- >- AS1(carry1AddP4:) >- AS2( add edi, ebp) >- AS2( mov ebp, 1) >- AS2( mov [ecx], edi) >- AS2( mov edi, [edx+4]) >- AS2( cmovc eax, ebp) >- AS2( mov ebp, [ebx+4]) >- AS2( add ebx, 8) >- AS2( add edi, eax) >- AS1( jc carry2AddP4) >- AS2( xor eax, eax) >- >- AS1(carry2AddP4:) >- AS2( add edi, ebp) >- AS2( mov ebp, 1) >- AS2( cmovc eax, ebp) >- AS2( mov [ecx+4], edi) >- AS2( add esi, 2) >- AS1( jnz loopstartAddP4) >- >- AS1(loopendAddP4:) >- >- AddEpilogue >-} >- >-CRYPTOPP_NAKED word P4Optimized::Subtract(word *C, const word *A, const word *B, unsigned int N) >-{ >- AddPrologue >- >- // now: ebx = B, ecx = C, edx = A, esi = N >- AS2( xor eax, eax) >- AS1( neg esi) >- AS1( jz loopendSubP4) // if no dwords then nothing to do >- >- AS2( mov edi, [edx]) >- AS2( mov ebp, [ebx]) >- AS1( jmp carry1SubP4) >- >- AS1(loopstartSubP4:) >- AS2( mov edi, [edx+8]) >- AS2( add edx, 8) >- AS2( add ecx, 8) >- AS2( mov ebp, [ebx]) >- AS2( sub edi, eax) >- AS1( jc carry1SubP4) >- AS2( xor eax, eax) >- >- AS1(carry1SubP4:) >- AS2( sub edi, ebp) >- AS2( mov ebp, 1) >- AS2( mov [ecx], edi) >- AS2( mov edi, [edx+4]) >- AS2( cmovc eax, ebp) >- AS2( mov ebp, [ebx+4]) >- AS2( add ebx, 8) >- AS2( sub edi, eax) >- AS1( jc carry2SubP4) >- AS2( xor eax, eax) >- >- AS1(carry2SubP4:) >- AS2( sub edi, ebp) >- AS2( mov ebp, 1) >- AS2( cmovc eax, ebp) >- AS2( mov [ecx+4], edi) >- AS2( add esi, 2) >- AS1( jnz loopstartSubP4) >- >- AS1(loopendSubP4:) >- >- AddEpilogue >-} >- > // multiply assembly code originally contributed by Leonard Janke > > #define MulStartup \ >@@ -3293,333 +3070,6 @@ > C[5] = _mm_add_epi64(a3b2, a2b3); > } > >-void P4Optimized::Multiply4(word *C, const word *A, const word *B) >-{ >- __m128i temp[7]; >- const word *w = (word *)temp; >- const __m64 *mw = (__m64 *)w; >- >- P4_Mul(temp, (__m128i *)A, (__m128i *)B); >- >- C[0] = w[0]; >- >- __m64 s1, s2; >- >- __m64 w1 = _mm_cvtsi32_si64(w[1]); >- __m64 w4 = mw[2]; >- __m64 w6 = mw[3]; >- __m64 w8 = mw[4]; >- __m64 w10 = mw[5]; >- __m64 w12 = mw[6]; >- __m64 w14 = mw[7]; >- __m64 w16 = mw[8]; >- __m64 w18 = mw[9]; >- __m64 w20 = mw[10]; >- __m64 w22 = mw[11]; >- __m64 w26 = _mm_cvtsi32_si64(w[26]); >- >- s1 = _mm_add_si64(w1, w4); >- C[1] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w6, w8); >- s1 = _mm_add_si64(s1, s2); >- C[2] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w10, w12); >- s1 = _mm_add_si64(s1, s2); >- C[3] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w14, w16); >- s1 = _mm_add_si64(s1, s2); >- C[4] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w18, w20); >- s1 = _mm_add_si64(s1, s2); >- C[5] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w22, w26); >- s1 = _mm_add_si64(s1, s2); >- C[6] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- C[7] = _mm_cvtsi64_si32(s1) + w[27]; >- _mm_empty(); >-} >- >-void P4Optimized::Multiply8(word *C, const word *A, const word *B) >-{ >- __m128i temp[28]; >- const word *w = (word *)temp; >- const __m64 *mw = (__m64 *)w; >- const word *x = (word *)temp+7*4; >- const __m64 *mx = (__m64 *)x; >- const word *y = (word *)temp+7*4*2; >- const __m64 *my = (__m64 *)y; >- const word *z = (word *)temp+7*4*3; >- const __m64 *mz = (__m64 *)z; >- >- P4_Mul(temp, (__m128i *)A, (__m128i *)B); >- >- P4_Mul(temp+7, (__m128i *)A+1, (__m128i *)B); >- >- P4_Mul(temp+14, (__m128i *)A, (__m128i *)B+1); >- >- P4_Mul(temp+21, (__m128i *)A+1, (__m128i *)B+1); >- >- C[0] = w[0]; >- >- __m64 s1, s2, s3, s4; >- >- __m64 w1 = _mm_cvtsi32_si64(w[1]); >- __m64 w4 = mw[2]; >- __m64 w6 = mw[3]; >- __m64 w8 = mw[4]; >- __m64 w10 = mw[5]; >- __m64 w12 = mw[6]; >- __m64 w14 = mw[7]; >- __m64 w16 = mw[8]; >- __m64 w18 = mw[9]; >- __m64 w20 = mw[10]; >- __m64 w22 = mw[11]; >- __m64 w26 = _mm_cvtsi32_si64(w[26]); >- __m64 w27 = _mm_cvtsi32_si64(w[27]); >- >- __m64 x0 = _mm_cvtsi32_si64(x[0]); >- __m64 x1 = _mm_cvtsi32_si64(x[1]); >- __m64 x4 = mx[2]; >- __m64 x6 = mx[3]; >- __m64 x8 = mx[4]; >- __m64 x10 = mx[5]; >- __m64 x12 = mx[6]; >- __m64 x14 = mx[7]; >- __m64 x16 = mx[8]; >- __m64 x18 = mx[9]; >- __m64 x20 = mx[10]; >- __m64 x22 = mx[11]; >- __m64 x26 = _mm_cvtsi32_si64(x[26]); >- __m64 x27 = _mm_cvtsi32_si64(x[27]); >- >- __m64 y0 = _mm_cvtsi32_si64(y[0]); >- __m64 y1 = _mm_cvtsi32_si64(y[1]); >- __m64 y4 = my[2]; >- __m64 y6 = my[3]; >- __m64 y8 = my[4]; >- __m64 y10 = my[5]; >- __m64 y12 = my[6]; >- __m64 y14 = my[7]; >- __m64 y16 = my[8]; >- __m64 y18 = my[9]; >- __m64 y20 = my[10]; >- __m64 y22 = my[11]; >- __m64 y26 = _mm_cvtsi32_si64(y[26]); >- __m64 y27 = _mm_cvtsi32_si64(y[27]); >- >- __m64 z0 = _mm_cvtsi32_si64(z[0]); >- __m64 z1 = _mm_cvtsi32_si64(z[1]); >- __m64 z4 = mz[2]; >- __m64 z6 = mz[3]; >- __m64 z8 = mz[4]; >- __m64 z10 = mz[5]; >- __m64 z12 = mz[6]; >- __m64 z14 = mz[7]; >- __m64 z16 = mz[8]; >- __m64 z18 = mz[9]; >- __m64 z20 = mz[10]; >- __m64 z22 = mz[11]; >- __m64 z26 = _mm_cvtsi32_si64(z[26]); >- >- s1 = _mm_add_si64(w1, w4); >- C[1] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w6, w8); >- s1 = _mm_add_si64(s1, s2); >- C[2] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w10, w12); >- s1 = _mm_add_si64(s1, s2); >- C[3] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x0, y0); >- s2 = _mm_add_si64(w14, w16); >- s1 = _mm_add_si64(s1, s3); >- s1 = _mm_add_si64(s1, s2); >- C[4] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x1, y1); >- s4 = _mm_add_si64(x4, y4); >- s1 = _mm_add_si64(s1, w18); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, w20); >- s1 = _mm_add_si64(s1, s3); >- C[5] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x6, y6); >- s4 = _mm_add_si64(x8, y8); >- s1 = _mm_add_si64(s1, w22); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, w26); >- s1 = _mm_add_si64(s1, s3); >- C[6] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x10, y10); >- s4 = _mm_add_si64(x12, y12); >- s1 = _mm_add_si64(s1, w27); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, s3); >- C[7] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x14, y14); >- s4 = _mm_add_si64(x16, y16); >- s1 = _mm_add_si64(s1, z0); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, s3); >- C[8] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x18, y18); >- s4 = _mm_add_si64(x20, y20); >- s1 = _mm_add_si64(s1, z1); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, z4); >- s1 = _mm_add_si64(s1, s3); >- C[9] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x22, y22); >- s4 = _mm_add_si64(x26, y26); >- s1 = _mm_add_si64(s1, z6); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, z8); >- s1 = _mm_add_si64(s1, s3); >- C[10] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x27, y27); >- s1 = _mm_add_si64(s1, z10); >- s1 = _mm_add_si64(s1, z12); >- s1 = _mm_add_si64(s1, s3); >- C[11] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(z14, z16); >- s1 = _mm_add_si64(s1, s3); >- C[12] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(z18, z20); >- s1 = _mm_add_si64(s1, s3); >- C[13] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(z22, z26); >- s1 = _mm_add_si64(s1, s3); >- C[14] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- C[15] = z[27] + _mm_cvtsi64_si32(s1); >- _mm_empty(); >-} >- >-void P4Optimized::Multiply8Bottom(word *C, const word *A, const word *B) >-{ >- __m128i temp[21]; >- const word *w = (word *)temp; >- const __m64 *mw = (__m64 *)w; >- const word *x = (word *)temp+7*4; >- const __m64 *mx = (__m64 *)x; >- const word *y = (word *)temp+7*4*2; >- const __m64 *my = (__m64 *)y; >- >- P4_Mul(temp, (__m128i *)A, (__m128i *)B); >- >- P4_Mul(temp+7, (__m128i *)A+1, (__m128i *)B); >- >- P4_Mul(temp+14, (__m128i *)A, (__m128i *)B+1); >- >- C[0] = w[0]; >- >- __m64 s1, s2, s3, s4; >- >- __m64 w1 = _mm_cvtsi32_si64(w[1]); >- __m64 w4 = mw[2]; >- __m64 w6 = mw[3]; >- __m64 w8 = mw[4]; >- __m64 w10 = mw[5]; >- __m64 w12 = mw[6]; >- __m64 w14 = mw[7]; >- __m64 w16 = mw[8]; >- __m64 w18 = mw[9]; >- __m64 w20 = mw[10]; >- __m64 w22 = mw[11]; >- __m64 w26 = _mm_cvtsi32_si64(w[26]); >- >- __m64 x0 = _mm_cvtsi32_si64(x[0]); >- __m64 x1 = _mm_cvtsi32_si64(x[1]); >- __m64 x4 = mx[2]; >- __m64 x6 = mx[3]; >- __m64 x8 = mx[4]; >- >- __m64 y0 = _mm_cvtsi32_si64(y[0]); >- __m64 y1 = _mm_cvtsi32_si64(y[1]); >- __m64 y4 = my[2]; >- __m64 y6 = my[3]; >- __m64 y8 = my[4]; >- >- s1 = _mm_add_si64(w1, w4); >- C[1] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w6, w8); >- s1 = _mm_add_si64(s1, s2); >- C[2] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s2 = _mm_add_si64(w10, w12); >- s1 = _mm_add_si64(s1, s2); >- C[3] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x0, y0); >- s2 = _mm_add_si64(w14, w16); >- s1 = _mm_add_si64(s1, s3); >- s1 = _mm_add_si64(s1, s2); >- C[4] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x1, y1); >- s4 = _mm_add_si64(x4, y4); >- s1 = _mm_add_si64(s1, w18); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, w20); >- s1 = _mm_add_si64(s1, s3); >- C[5] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- s3 = _mm_add_si64(x6, y6); >- s4 = _mm_add_si64(x8, y8); >- s1 = _mm_add_si64(s1, w22); >- s3 = _mm_add_si64(s3, s4); >- s1 = _mm_add_si64(s1, w26); >- s1 = _mm_add_si64(s1, s3); >- C[6] = _mm_cvtsi64_si32(s1); >- s1 = _mm_srli_si64(s1, 32); >- >- C[7] = _mm_cvtsi64_si32(s1) + w[27] + x[10] + y[10] + x[12] + y[12]; >- _mm_empty(); >-} >- > #endif // #ifdef SSE2_INTRINSICS_AVAILABLE > > // ******************************************************** >@@ -4064,18 +3514,6 @@ > DWord q = DivideFourWordsByTwo<word, DWord>(T, DWord(A[0], A[1]), DWord(A[2], A[3]), DWord(B[0], B[1])); > Q[0] = q.GetLowHalf(); > Q[1] = q.GetHighHalf(); >- >-#ifndef NDEBUG >- if (B[0] || B[1]) >- { >- // multiply quotient and divisor and add remainder, make sure it equals dividend >- assert(!T[2] && !T[3] && (T[1] < B[1] || (T[1]==B[1] && T[0]<B[0]))); >- word P[4]; >- Portable::Multiply2(P, Q, B); >- Add(P, P, T, 4); >- assert(memcmp(P, A, 4*WORD_SIZE)==0); >- } >-#endif > } > > // for use by Divide(), corrects the underestimated quotient {Q1,Q0} >@@ -9421,10 +8859,6 @@ > > //- #include "modes.h" > >-#ifndef NDEBUG >-//- #include "des.h" >-#endif >- > NAMESPACE_BEGIN(CryptoPP) > > void CipherModeBase::SetKey(const byte *key, unsigned int length, const NameValuePairs ¶ms)
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 66605
:
42026
|
42035