--- a/ipc/chromium/src/base/atomicops.h Wed May 04 15:39:12 2011 +0800 +++ a/ipc/chromium/src/base/atomicops.h Wed May 04 11:20:40 2011 +0200 @@ -132,6 +132,8 @@ #include "base/atomicops_internals_x86_gcc.h" #elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY) #include "base/atomicops_internals_arm_gcc.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_PPC) +#include "base/atomicops_internals_ppc_gcc.h" #else #include "base/atomicops_internals_mutex.h" #endif --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ 3ff945bdace7 Wed May 04 11:20:40 2011 +0200 @@ -0,0 +1,148 @@ +// Copyright (c) 2010 JJDaNiMoTh . All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is an internal atomic implementation, use base/atomicops.h instead. + +#ifndef BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_ +#define BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_ +#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") + +#define PPC_ACQUIRE_BARRIER "\nisync\n" +#define PPC_RELEASE_BARRIER "\nlwsync\n" + +namespace base { +namespace subtle { + +// 32-bit low-level operations on any platform. + +/* + * Compare and exchange - if *ptr == old, set it to new, + * and return the old value of *p. + */ +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 prev; + + __asm__ __volatile__ ( + "1: lwarx %0,0,%2\n" + "cmpw 0,%0,%3\n" + "bne- 2f\n" + "stwcx. %4,0,%2\n" + "bne- 1b\n" + "2:\n" + : "=&r" (prev), "+m" (*ptr) + : "r" (ptr), "r" (old_value), "r" (new_value) + : "cc", "memory"); + + return prev; +} + +/* +* Atomic exchange +* +* Changes the memory location '*ptr' to be new_value and returns +* the previous value stored there. +*/ +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 prev; + + __asm__ __volatile__( +"1: lwarx %0,0,%2 \n" +" stwcx. %3,0,%2 \n\ + bne- 1b" + : "=&r" (prev), "+m" (*ptr) + : "r" (ptr), "r" (new_value) + : "cc", "memory"); + + return prev; +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 temp; + + __asm__ __volatile__( +"1: lwarx %0,0,%2\n\ + add %0,%1,%0\n" +" stwcx. %0,0,%2 \n\ + bne- 1b" + : "=&r" (temp) + : "r" (increment), "r" (ptr) + : "cc", "memory"); + + return temp; +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + Atomic32 temp; + + __asm__ __volatile__( + PPC_RELEASE_BARRIER +"1: lwarx %0,0,%2\n\ + add %0,%1,%0\n" +" stwcx. %0,0,%2 \n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + : "=&r" (temp) + : "r" (increment), "r" (ptr) + : "cc", "memory"); + + return temp; +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; +} + +inline void MemoryBarrier() { + __asm__ __volatile__("sync" : : : "memory"); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return *ptr; +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; + MemoryBarrier(); + return value; + +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + MemoryBarrier(); + return *ptr; +} + +} // namespace base::subtle +} // namespace base + +#undef ATOMICOPS_COMPILER_BARRIER + +#endif // BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_ --- a/ipc/chromium/src/base/data_pack.cc Wed May 04 15:39:12 2011 +0800 +++ a/ipc/chromium/src/base/data_pack.cc Wed May 04 11:20:40 2011 +0200 @@ -91,15 +91,7 @@ bool DataPack::Get(uint32_t resource_id, StringPiece* data) { // It won't be hard to make this endian-agnostic, but it's not worth // bothering to do right now. -#if defined(__BYTE_ORDER) - // Linux check - COMPILE_ASSERT(__BYTE_ORDER == __LITTLE_ENDIAN, - datapack_assumes_little_endian); -#elif defined(__BIG_ENDIAN__) - // Mac check - #error DataPack assumes little endian -#endif - +#warning DoTheRightThingMakingThisEndianAgnostic! DataPackEntry* target = reinterpret_cast( bsearch(&resource_id, mmap_->data() + kHeaderLength, resource_count_, sizeof(DataPackEntry), DataPackEntry::CompareById)); --- a/ipc/chromium/src/base/debug_util_posix.cc Wed May 04 15:39:12 2011 +0800 +++ a/ipc/chromium/src/base/debug_util_posix.cc Wed May 04 11:20:40 2011 +0200 @@ -110,8 +110,10 @@ // static void DebugUtil::BreakDebugger() { -#if defined(ARCH_CPU_X86_FAMILY) +#if defined(ARCH_CPU_X86_FAMILY) && !defined(ARCH_CPU_PPC) asm ("int3"); +elif defined(ARCH_CPU_PPC) + asm ("trap"); #endif } --- a/ipc/chromium/src/build/build_config.h 2011-04-14 07:28:30.000000000 +0200 +++ a/ipc/chromium/src/build/build_config.h 2011-05-04 21:01:32.000000000 +0200 @@ -57,7 +57,7 @@ #define ARCH_CPU_ARMEL 1 #define ARCH_CPU_32_BITS 1 #define WCHAR_T_IS_UNSIGNED 1 -#elif defined(__ppc__) +#elif defined(__ppc__) || defined(__powerpc__) #define ARCH_CPU_PPC 1 #define ARCH_CPU_32_BITS 1 #else --- a/ipc/chromium/Makefile.in 2011-04-14 07:28:29.000000000 +0200 +++ a/ipc/chromium/Makefile.in 2011-05-05 13:18:54.000000000 +0200 @@ -318,10 +318,12 @@ ifneq (86,$(findstring 86,$(OS_TEST))) # { ifneq (arm,$(findstring arm,$(OS_TEST))) # { +ifneq (powerpc,$(findstring powerpc,$(OS_TEST))) # { # Use mutex-backed atomics CPPSRCS += atomicops_internals_mutex.cc endif # } endif # } +endif # } OS_CXXFLAGS += $(TK_CFLAGS)