[PATCH] includes: Fix register constraint for interlocked operations
Maarten Lankhorst
m.b.lankhorst at gmail.com
Fri Sep 23 10:37:43 CDT 2011
"m"(*dest) allows for better assembly, than "r"(dest), by allowing
offsets instead of requiring a extra load instruction for that.
Signed-off-by: Maarten Lankhorst <m.b.lankhorst at gmail.com>
---
diff --git a/include/winbase.h b/include/winbase.h
index a37d073..5458331 100644
--- a/include/winbase.h
+++ b/include/winbase.h
@@ -2355,24 +2355,24 @@ extern WCHAR * CDECL wine_get_dos_file_name( LPCSTR str );
static FORCEINLINE LONG WINAPI InterlockedCompareExchange( LONG volatile *dest, LONG xchg, LONG compare )
{
LONG ret;
- __asm__ __volatile__( "lock; cmpxchgl %2,(%1)"
- : "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
+ __asm__ __volatile__( "lock; cmpxchgl %2,%1"
+ : "=a" (ret) : "m" (*dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
}
static FORCEINLINE LONG WINAPI InterlockedExchange( LONG volatile *dest, LONG val )
{
LONG ret;
- __asm__ __volatile__( "lock; xchgl %0,(%1)"
- : "=r" (ret) :"r" (dest), "0" (val) : "memory" );
+ __asm__ __volatile__( "lock; xchgl %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (val) : "memory" );
return ret;
}
static FORCEINLINE LONG WINAPI InterlockedExchangeAdd( LONG volatile *dest, LONG incr )
{
LONG ret;
- __asm__ __volatile__( "lock; xaddl %0,(%1)"
- : "=r" (ret) : "r" (dest), "0" (incr) : "memory" );
+ __asm__ __volatile__( "lock; xaddl %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (incr) : "memory" );
return ret;
}
@@ -2414,8 +2414,8 @@ static FORCEINLINE LONG WINAPI InterlockedCompareExchange( LONG volatile *dest,
{
#if defined(__x86_64__) && defined(__GNUC__)
LONG ret;
- __asm__ __volatile__( "lock; cmpxchgl %2,(%1)"
- : "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
+ __asm__ __volatile__( "lock; cmpxchgl %2,%1"
+ : "=a" (ret) : "m" (*dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
#else
extern int interlocked_cmpxchg( int *dest, int xchg, int compare );
@@ -2427,8 +2427,8 @@ static FORCEINLINE PVOID WINAPI InterlockedCompareExchangePointer( PVOID volatil
{
#if defined(__x86_64__) && defined(__GNUC__)
PVOID ret;
- __asm__ __volatile__( "lock; cmpxchgq %2,(%1)"
- : "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
+ __asm__ __volatile__( "lock; cmpxchgq %2,%1"
+ : "=a" (ret) : "m" (*dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
#else
extern void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *compare );
@@ -2440,8 +2440,8 @@ static FORCEINLINE LONGLONG WINAPI InterlockedCompareExchange64( LONGLONG volati
{
#if defined(__x86_64__) && defined(__GNUC__)
LONGLONG ret;
- __asm__ __volatile__( "lock; cmpxchgq %2,(%1)"
- : "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
+ __asm__ __volatile__( "lock; cmpxchgq %2,%1"
+ : "=a" (ret) : "m" (*dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
#else
extern __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compare );
@@ -2453,8 +2453,8 @@ static FORCEINLINE LONG WINAPI InterlockedExchange( LONG volatile *dest, LONG va
{
#if defined(__x86_64__) && defined(__GNUC__)
LONG ret;
- __asm__ __volatile__( "lock; xchgl %0,(%1)"
- : "=r" (ret) :"r" (dest), "0" (val) : "memory" );
+ __asm__ __volatile__( "lock; xchgl %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (val) : "memory" );
return ret;
#else
extern int interlocked_xchg( int *dest, int val );
@@ -2466,8 +2466,8 @@ static FORCEINLINE PVOID WINAPI InterlockedExchangePointer( PVOID volatile *dest
{
#if defined(__x86_64__) && defined(__GNUC__)
PVOID ret;
- __asm__ __volatile__( "lock; xchgq %0,(%1)"
- : "=r" (ret) :"r" (dest), "0" (val) : "memory" );
+ __asm__ __volatile__( "lock; xchgq %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (val) : "memory" );
return ret;
#else
extern void *interlocked_xchg_ptr( void **dest, void *val );
@@ -2479,8 +2479,8 @@ static FORCEINLINE LONG WINAPI InterlockedExchangeAdd( LONG volatile *dest, LONG
{
#if defined(__x86_64__) && defined(__GNUC__)
LONG ret;
- __asm__ __volatile__( "lock; xaddl %0,(%1)"
- : "=r" (ret) : "r" (dest), "0" (incr) : "memory" );
+ __asm__ __volatile__( "lock; xaddl %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (incr) : "memory" );
return ret;
#else
extern int interlocked_xchg_add( int *dest, int incr );
diff --git a/include/wine/port.h b/include/wine/port.h
index 8281653..215b56f 100644
--- a/include/wine/port.h
+++ b/include/wine/port.h
@@ -356,8 +356,8 @@ extern __int64 interlocked_cmpxchg64( __int64 *dest, __int64 xchg, __int64 compa
static inline int interlocked_cmpxchg( int *dest, int xchg, int compare )
{
int ret;
- __asm__ __volatile__( "lock; cmpxchgl %2,(%1)"
- : "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
+ __asm__ __volatile__( "lock; cmpxchgl %2,%1"
+ : "=a" (ret) : "m" (*dest), "r" (xchg), "0" (compare) : "memory" );
return ret;
}
@@ -365,11 +365,11 @@ static inline void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *comp
{
void *ret;
#ifdef __x86_64__
- __asm__ __volatile__( "lock; cmpxchgq %2,(%1)"
- : "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
+ __asm__ __volatile__( "lock; cmpxchgq %2,%1"
+ : "=a" (ret) : "m" (*dest), "r" (xchg), "0" (compare) : "memory" );
#else
- __asm__ __volatile__( "lock; cmpxchgl %2,(%1)"
- : "=a" (ret) : "r" (dest), "r" (xchg), "0" (compare) : "memory" );
+ __asm__ __volatile__( "lock; cmpxchgl %2,%1"
+ : "=a" (ret) : "m" (*dest), "r" (xchg), "0" (compare) : "memory" );
#endif
return ret;
}
@@ -377,8 +377,8 @@ static inline void *interlocked_cmpxchg_ptr( void **dest, void *xchg, void *comp
static inline int interlocked_xchg( int *dest, int val )
{
int ret;
- __asm__ __volatile__( "lock; xchgl %0,(%1)"
- : "=r" (ret) : "r" (dest), "0" (val) : "memory" );
+ __asm__ __volatile__( "lock; xchgl %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (val) : "memory" );
return ret;
}
@@ -386,11 +386,11 @@ static inline void *interlocked_xchg_ptr( void **dest, void *val )
{
void *ret;
#ifdef __x86_64__
- __asm__ __volatile__( "lock; xchgq %0,(%1)"
- : "=r" (ret) :"r" (dest), "0" (val) : "memory" );
+ __asm__ __volatile__( "lock; xchgq %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (val) : "memory" );
#else
- __asm__ __volatile__( "lock; xchgl %0,(%1)"
- : "=r" (ret) : "r" (dest), "0" (val) : "memory" );
+ __asm__ __volatile__( "lock; xchgl %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (val) : "memory" );
#endif
return ret;
}
@@ -398,8 +398,8 @@ static inline void *interlocked_xchg_ptr( void **dest, void *val )
static inline int interlocked_xchg_add( int *dest, int incr )
{
int ret;
- __asm__ __volatile__( "lock; xaddl %0,(%1)"
- : "=r" (ret) : "r" (dest), "0" (incr) : "memory" );
+ __asm__ __volatile__( "lock; xaddl %0,%1"
+ : "=r" (ret) : "m" (*dest), "0" (incr) : "memory" );
return ret;
}
More information about the wine-patches
mailing list