From 2bf614caedd8d2c204adbeba19e1ce6e15abc030 Mon Sep 17 00:00:00 2001 From: James Cowgill Date: Fri, 6 Feb 2015 08:37:00 -0500 Subject: [PATCH] Bug 1130438 - ipc/chromium: copy mips64 atomic patches from upstream chromium. r=froydnj Upstream commits: https://chromium.googlesource.com/chromium/src.git/+/fc47526241e2367035b107f853c89e27573304ff https://chromium.googlesource.com/chromium/src.git/+/5614f28582b0665d79218fb00f014d4afad4576d https://chromium.googlesource.com/chromium/src.git/+/8bd6f985f24f3ab29fbfd325e678121f33dd3518 --- .../src/base/atomicops_internals_mips_gcc.h | 157 ++++++++++++++++-- ipc/chromium/src/build/build_config.h | 3 + 2 files changed, 142 insertions(+), 18 deletions(-) diff --git a/ipc/chromium/src/base/atomicops_internals_mips_gcc.h b/ipc/chromium/src/base/atomicops_internals_mips_gcc.h index e8a1c76a0b3d..80f5feb84702 100644 --- a/ipc/chromium/src/base/atomicops_internals_mips_gcc.h +++ b/ipc/chromium/src/base/atomicops_internals_mips_gcc.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,13 +25,13 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// This file is an internal atomic implementation, use atomicops.h instead. +// This file is an internal atomic implementation, use base/atomicops.h instead. +// +// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ -#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") - namespace base { namespace subtle { @@ -61,7 +61,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, "2:\n" ".set pop\n" : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) - : "Ir" (old_value), "r" (new_value), "m" (*ptr) + : "r" (old_value), "r" (new_value), "m" (*ptr) : "memory"); return prev; } @@ -74,7 +74,7 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" - "ll %1, %2\n" // old = *ptr + "ll %1, %4\n" // old = *ptr "move %0, %3\n" // temp = new_value "sc %0, %2\n" // *ptr = temp (with atomic check) "beqz %0, 1b\n" // start again on atomic error @@ -96,7 +96,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" - "ll %0, %2\n" // temp = *ptr + "ll %0, %4\n" // temp = *ptr "addu %1, %0, %3\n" // temp2 = temp + increment "sc %1, %2\n" // *ptr = temp2 (with atomic check) "beqz %1, 1b\n" // start again on atomic error @@ -111,9 +111,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { - ATOMICOPS_COMPILER_BARRIER(); + MemoryBarrier(); Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); - ATOMICOPS_COMPILER_BARRIER(); + MemoryBarrier(); return res; } @@ -126,19 +126,16 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { - ATOMICOPS_COMPILER_BARRIER(); Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - ATOMICOPS_COMPILER_BARRIER(); + MemoryBarrier(); return res; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { - ATOMICOPS_COMPILER_BARRIER(); - Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - ATOMICOPS_COMPILER_BARRIER(); - return res; + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { @@ -174,9 +171,133 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) { return *ptr; } -} // namespace subtle -} // namespace base +#if defined(__LP64__) +// 64-bit versions of the atomic ops. -#undef ATOMICOPS_COMPILER_BARRIER +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 prev, tmp; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "lld %0, %5\n" // prev = *ptr + "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 + "move %2, %4\n" // tmp = new_value + "scd %2, %1\n" // *ptr = tmp (with atomic check) + "beqz %2, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + "2:\n" + ".set pop\n" + : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) + : "r" (old_value), "r" (new_value), "m" (*ptr) + : "memory"); + return prev; +} + +// Atomically store new_value into *ptr, returning the previous value held in +// *ptr. This routine implies no memory barriers. +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + Atomic64 temp, old; + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "lld %1, %4\n" // old = *ptr + "move %0, %3\n" // temp = new_value + "scd %0, %2\n" // *ptr = temp (with atomic check) + "beqz %0, 1b\n" // start again on atomic error + "nop\n" // delay slot nop + ".set pop\n" + : "=&r" (temp), "=&r" (old), "=m" (*ptr) + : "r" (new_value), "m" (*ptr) + : "memory"); + + return old; +} + +// Atomically increment *ptr by "increment". Returns the new value of +// *ptr with the increment applied. This routine implies no memory barriers. +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + Atomic64 temp, temp2; + + __asm__ __volatile__(".set push\n" + ".set noreorder\n" + "1:\n" + "lld %0, %4\n" // temp = *ptr + "daddu %1, %0, %3\n" // temp2 = temp + increment + "scd %1, %2\n" // *ptr = temp2 (with atomic check) + "beqz %1, 1b\n" // start again on atomic error + "daddu %1, %0, %3\n" // temp2 = temp + increment + ".set pop\n" + : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) + : "Ir" (increment), "m" (*ptr) + : "memory"); + // temp2 now holds the final value. + return temp2; +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + MemoryBarrier(); + Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); + MemoryBarrier(); + return res; +} + +// "Acquire" operations +// ensure that no later memory access can be reordered ahead of the operation. +// "Release" operations ensure that no previous memory access can be reordered +// after the operation. "Barrier" operations have both "Acquire" and "Release" +// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory +// access. +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); + MemoryBarrier(); + return res; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + *ptr = value; + MemoryBarrier(); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + MemoryBarrier(); + *ptr = value; +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return *ptr; +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + Atomic64 value = *ptr; + MemoryBarrier(); + return value; +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + MemoryBarrier(); + return *ptr; +} +#endif + +} // namespace base::subtle +} // namespace base #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ diff --git a/ipc/chromium/src/build/build_config.h b/ipc/chromium/src/build/build_config.h index e3f4189650df..7a4938e06c04 100644 --- a/ipc/chromium/src/build/build_config.h +++ b/ipc/chromium/src/build/build_config.h @@ -87,6 +87,9 @@ #elif defined(__sparc__) #define ARCH_CPU_SPARC 1 #define ARCH_CPU_32_BITS 1 +#elif defined(__mips64) && defined(__LP64__) +#define ARCH_CPU_MIPS 1 +#define ARCH_CPU_64_BITS 1 #elif defined(__mips__) #define ARCH_CPU_MIPS 1 #define ARCH_CPU_32_BITS 1