X-Git-Url: https://git.mxchange.org/?a=blobdiff_plain;f=simgear%2Fstructure%2FSGAtomic.cxx;h=71f24ed96eec87d472df866fb18ba5caafc2a447;hb=66c9187c95fb6861d9737f135fdf68d1bfa265f3;hp=3505fabf601b2c9ca57eccf0f8e6f48ea3285b51;hpb=11b16b8a86d49743a2f29570871bd8adccb51890;p=simgear.git diff --git a/simgear/structure/SGAtomic.cxx b/simgear/structure/SGAtomic.cxx index 3505fabf..71f24ed9 100644 --- a/simgear/structure/SGAtomic.cxx +++ b/simgear/structure/SGAtomic.cxx @@ -1,6 +1,6 @@ /* -*-c++-*- * - * Copyright (C) 2005-2006 Mathias Froehlich + * Copyright (C) 2005-2009,2011 Mathias Froehlich * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -18,46 +18,105 @@ * */ -#include "SGAtomic.hxx" +#ifdef HAVE_CONFIG_H +# include +#endif -#if defined(SGATOMIC_USE_GCC4_BUILTINS) && defined(__i386__) +#include "SGAtomic.hxx" -// Usually the apropriate functions are inlined by gcc. -// But if gcc is called with something aequivalent to -march=i386, -// it will not assume that there is a lock instruction and instead -// calls this pair of functions. We will provide them here in this case. -// Note that this assembler code will not work on a i386 chip anymore. -// But I hardly believe that we can assume to run at least on a i486 ... +#if defined(SGATOMIC_USE_LIBRARY_FUNCTIONS) -extern "C" { +#if defined(_WIN32) +# include +#elif defined(GCC_ATOMIC_BUILTINS_FOUND) +#elif defined(__GNUC__) && defined(__i386__) +#elif defined(SGATOMIC_USE_MUTEX) +# include +#else +# error +#endif -unsigned __sync_sub_and_fetch_4(volatile void *ptr, unsigned value) +unsigned +SGAtomic::operator++() { - register volatile unsigned* mem = reinterpret_cast(ptr); - register unsigned result; - __asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}" - : "=r" (result), "=m" (*mem) - : "0" (-value), "m" (*mem) - : "memory"); - return result; +#if defined(_WIN32) + return InterlockedIncrement(reinterpret_cast(&mValue)); +#elif defined(GCC_ATOMIC_BUILTINS_FOUND) + return __sync_add_and_fetch(&mValue, 1); +#elif defined(__GNUC__) && defined(__i386__) + register volatile unsigned* mem = reinterpret_cast(&mValue); + register unsigned result; + __asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}" + : "=r" (result), "=m" (*mem) + : "0" (1), "m" (*mem) + : "memory"); + return result + 1; +#else + SGGuard lock(mMutex); + return ++mValue; +#endif } -unsigned __sync_add_and_fetch_4(volatile void *ptr, unsigned value) +unsigned +SGAtomic::operator--() { - register volatile unsigned* mem = reinterpret_cast(ptr); - register unsigned result; - __asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}" - : "=r" (result), "=m" (*mem) - : "0" (value), "m" (*mem) - : "memory"); - return result; +#if defined(_WIN32) + return InterlockedDecrement(reinterpret_cast(&mValue)); +#elif defined(GCC_ATOMIC_BUILTINS_FOUND) + return __sync_sub_and_fetch(&mValue, 1); +#elif defined(__GNUC__) && defined(__i386__) + register volatile unsigned* mem = reinterpret_cast(&mValue); + register unsigned result; + __asm__ __volatile__("lock; xadd{l} {%0,%1|%1,%0}" + : "=r" (result), "=m" (*mem) + : "0" (-1), "m" (*mem) + : "memory"); + return result - 1; +#else + SGGuard lock(mMutex); + return --mValue; +#endif } -void __sync_synchronize() +SGAtomic::operator unsigned() const { - __asm__ __volatile__("": : : "memory"); +#if defined(_WIN32) + return static_cast(mValue); +#elif defined(GCC_ATOMIC_BUILTINS_FOUND) + __sync_synchronize(); + return mValue; +#elif defined(__GNUC__) && defined(__i386__) + __asm__ __volatile__("": : : "memory"); + return mValue; +#else + SGGuard lock(mMutex); + return mValue; +#endif } -} // extern "C" +bool +SGAtomic::compareAndExchange(unsigned oldValue, unsigned newValue) +{ +#if defined(_WIN32) + long volatile* lvPtr = reinterpret_cast(&mValue); + return oldValue == InterlockedCompareExchange(lvPtr, newValue, oldValue); +#elif defined(GCC_ATOMIC_BUILTINS_FOUND) + return __sync_bool_compare_and_swap(&mValue, oldValue, newValue); +#elif defined(__GNUC__) && defined(__i386__) + register volatile unsigned* mem = reinterpret_cast(&mValue); + unsigned before; + __asm__ __volatile__("lock; cmpxchg{l} {%1,%2|%1,%2}" + : "=a"(before) + : "q"(newValue), "m"(*mem), "0"(oldValue) + : "memory"); + return before == oldValue; +#else + SGGuard lock(mMutex); + if (mValue != oldValue) + return false; + mValue = newValue; + return true; +#endif +} #endif