gatomic.c 22.6 KB
Newer Older
1 2 3
/* GLIB - Library of useful routines for C programming
 * Copyright (C) 1995-1997  Peter Mattis, Spencer Kimball and Josh MacDonald
 *
4
 * g_atomic_*: atomic operations.
5
 * Copyright (C) 2003 Sebastian Wilhelmi
6
 * Copyright (C) 2007 Nokia Corporation
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 02111-1307, USA.
 */
23 24 25 26 27

#if defined (G_ATOMIC_ARM)
#include <sched.h>
#endif

28
#include "config.h"
29

30
#include "glib.h"
31
#include "gthreadprivate.h"
Matthias Clasen's avatar
Matthias Clasen committed
32
#include "galias.h"
33 34 35 36 37 38

#if defined (__GNUC__)
# if defined (G_ATOMIC_I486)
/* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h 
 */
gint
39 40
g_atomic_int_exchange_and_add (volatile gint *atomic, 
			       gint           val)
41 42 43 44 45 46 47 48 49 50
{
  gint result;

  __asm__ __volatile__ ("lock; xaddl %0,%1"
                        : "=r" (result), "=m" (*atomic) 
			: "0" (val), "m" (*atomic));
  return result;
}
 
void
51 52
g_atomic_int_add (volatile gint *atomic, 
		  gint           val)
53 54 55 56 57 58 59
{
  __asm__ __volatile__ ("lock; addl %1,%0"
			: "=m" (*atomic) 
			: "ir" (val), "m" (*atomic));
}

gboolean
60 61 62
g_atomic_int_compare_and_exchange (volatile gint *atomic, 
				   gint           oldval, 
				   gint           newval)
63 64 65
{
  gint result;
 
66 67 68
  __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
			: "=a" (result), "=m" (*atomic)
			: "r" (newval), "m" (*atomic), "0" (oldval)); 
69 70 71 72 73 74 75 76 77

  return result == oldval;
}

/* The same code as above, as on i386 gpointer is 32 bit as well.
 * Duplicating the code here seems more natural than casting the
 * arguments and calling the former function */

gboolean
78 79 80
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
81 82 83
{
  gpointer result;
 
84 85 86
  __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
			: "=a" (result), "=m" (*atomic)
			: "r" (newval), "m" (*atomic), "0" (oldval)); 
87 88 89 90 91 92 93 94 95 96

  return result == oldval;
}

# elif defined (G_ATOMIC_SPARCV9)
/* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
 */
#  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)			\
  ({ 									\
     gint __result;							\
97 98 99 100
     __asm__ __volatile__ ("cas [%4], %2, %0"				\
                           : "=r" (__result), "=m" (*(atomic))		\
                           : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
                           "0" (newval));				\
101 102 103 104 105
     __result == oldval;						\
  })

#  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
gboolean
106 107 108
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
109 110
{
  gpointer result;
111 112 113 114
  __asm__ __volatile__ ("cas [%4], %2, %0"
			: "=r" (result), "=m" (*atomic)
			: "r" (oldval), "m" (*atomic), "r" (atomic),
			"0" (newval));
115 116 117 118
  return result == oldval;
}
#  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
gboolean
119 120 121
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
122 123 124
{
  gpointer result;
  gpointer *a = atomic;
125 126 127 128
  __asm__ __volatile__ ("casx [%4], %2, %0"
			: "=r" (result), "=m" (*a)
			: "r" (oldval), "m" (*a), "r" (a),
			"0" (newval));
129
  return result == oldval;
130 131 132 133 134
}
#  else /* What's that */
#    error "Your system has an unsupported pointer size"
#  endif /* GLIB_SIZEOF_VOID_P */
#  define G_ATOMIC_MEMORY_BARRIER					\
135 136
  __asm__ __volatile__ ("membar #LoadLoad | #LoadStore"			\
                        " | #StoreLoad | #StoreStore" : : : "memory")
137 138 139

# elif defined (G_ATOMIC_ALPHA)
/* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
140
 */
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
#  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)			\
  ({ 									\
     gint __result;							\
     gint __prev;							\
     __asm__ __volatile__ (						\
        "       mb\n"							\
        "1:     ldl_l   %0,%2\n"					\
        "       cmpeq   %0,%3,%1\n"					\
        "       beq     %1,2f\n"					\
        "       mov     %4,%1\n"					\
        "       stl_c   %1,%2\n"					\
        "       beq     %1,1b\n"					\
        "       mb\n"							\
        "2:"								\
        : "=&r" (__prev), 						\
          "=&r" (__result)						\
        : "m" (*(atomic)),						\
          "Ir" (oldval),						\
          "Ir" (newval)							\
        : "memory");							\
     __result != 0;							\
  })
#  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
gboolean
165 166 167
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
{
  gint result;
  gpointer prev;
  __asm__ __volatile__ (
        "       mb\n"
        "1:     ldl_l   %0,%2\n"
        "       cmpeq   %0,%3,%1\n"
        "       beq     %1,2f\n"
        "       mov     %4,%1\n"
        "       stl_c   %1,%2\n"
        "       beq     %1,1b\n"
        "       mb\n"
        "2:"
        : "=&r" (prev), 
          "=&r" (result)
        : "m" (*atomic),
          "Ir" (oldval),
          "Ir" (newval)
        : "memory");
  return result != 0;
}
#  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
gboolean
191 192 193
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
{
  gint result;
  gpointer prev;
  __asm__ __volatile__ (
        "       mb\n"
        "1:     ldq_l   %0,%2\n"
        "       cmpeq   %0,%3,%1\n"
        "       beq     %1,2f\n"
        "       mov     %4,%1\n"
        "       stq_c   %1,%2\n"
        "       beq     %1,1b\n"
        "       mb\n"
        "2:"
        : "=&r" (prev), 
          "=&r" (result)
        : "m" (*atomic),
          "Ir" (oldval),
          "Ir" (newval)
        : "memory");
  return result != 0;
}
#  else /* What's that */
#   error "Your system has an unsupported pointer size"
#  endif /* GLIB_SIZEOF_VOID_P */
218
#  define G_ATOMIC_MEMORY_BARRIER  __asm__ ("mb" : : : "memory")
219 220 221 222
# elif defined (G_ATOMIC_X86_64)
/* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h 
 */
gint
223 224
g_atomic_int_exchange_and_add (volatile gint *atomic,
			       gint           val)
225 226
{
  gint result;
227

228 229 230 231 232 233 234
  __asm__ __volatile__ ("lock; xaddl %0,%1"
                        : "=r" (result), "=m" (*atomic) 
			: "0" (val), "m" (*atomic));
  return result;
}
 
void
235 236
g_atomic_int_add (volatile gint *atomic, 
		  gint           val)
237
{
238 239 240 241 242 243
  __asm__ __volatile__ ("lock; addl %1,%0"
			: "=m" (*atomic) 
			: "ir" (val), "m" (*atomic));
}

gboolean
244 245 246
g_atomic_int_compare_and_exchange (volatile gint *atomic, 
				   gint           oldval, 
				   gint           newval)
247 248 249
{
  gint result;
 
250 251 252
  __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
			: "=a" (result), "=m" (*atomic)
			: "r" (newval), "m" (*atomic), "0" (oldval)); 
253 254

  return result == oldval;
255 256
}

257
gboolean
258 259 260
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
261 262 263
{
  gpointer result;
 
264 265 266
  __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
			: "=a" (result), "=m" (*atomic)
			: "r" (newval), "m" (*atomic), "0" (oldval)); 
267

268 269 270 271
  return result == oldval;
}

# elif defined (G_ATOMIC_POWERPC)
272 273 274
/* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h 
 * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h 
 * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h 
275
 */
276 277 278
#   ifdef __OPTIMIZE__
/* Non-optimizing compile bails on the following two asm statements
 * for reasons unknown to the author */
279
gint
280 281
g_atomic_int_exchange_and_add (volatile gint *atomic, 
			       gint           val)
282 283
{
  gint result, temp;
284 285 286 287 288 289 290 291 292
#if ASM_NUMERIC_LABELS
  __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
			"         add     %1,%0,%4\n"
			"         stwcx.  %1,0,%3\n"
			"         bne-    1b"
			: "=&b" (result), "=&r" (temp), "=m" (*atomic)
			: "b" (atomic), "r" (val), "m" (*atomic)
			: "cr0", "memory");
#else
293
  __asm__ __volatile__ (".Lieaa%=:       lwarx   %0,0,%3\n"
294 295
			"         add     %1,%0,%4\n"
			"         stwcx.  %1,0,%3\n"
296
			"         bne-    .Lieaa%="
297
			: "=&b" (result), "=&r" (temp), "=m" (*atomic)
298
			: "b" (atomic), "r" (val), "m" (*atomic)
299
			: "cr0", "memory");
300
#endif
301 302 303 304
  return result;
}
 
/* The same as above, to save a function call repeated here */
305
void
306 307
g_atomic_int_add (volatile gint *atomic, 
		  gint           val)
308
{
309
  gint result, temp;  
310 311 312 313 314 315 316 317 318
#if ASM_NUMERIC_LABELS
  __asm__ __volatile__ ("1:       lwarx   %0,0,%3\n"
			"         add     %1,%0,%4\n"
			"         stwcx.  %1,0,%3\n"
			"         bne-    1b"
			: "=&b" (result), "=&r" (temp), "=m" (*atomic)
			: "b" (atomic), "r" (val), "m" (*atomic)
			: "cr0", "memory");
#else
319
  __asm__ __volatile__ (".Lia%=:       lwarx   %0,0,%3\n"
320 321
			"         add     %1,%0,%4\n"
			"         stwcx.  %1,0,%3\n"
322
			"         bne-    .Lia%="
323
			: "=&b" (result), "=&r" (temp), "=m" (*atomic)
324
			: "b" (atomic), "r" (val), "m" (*atomic)
325
			: "cr0", "memory");
326
#endif
327
}
328 329
#   else /* !__OPTIMIZE__ */
gint
330 331
g_atomic_int_exchange_and_add (volatile gint *atomic, 
			       gint           val)
332 333 334 335 336 337 338 339 340 341
{
  gint result;
  do
    result = *atomic;
  while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));

  return result;
}
 
void
342 343
g_atomic_int_add (volatile gint *atomic,
		  gint           val)
344 345 346 347 348 349 350
{
  gint result;
  do
    result = *atomic;
  while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
}
#   endif /* !__OPTIMIZE__ */
351

352
#   if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
353
gboolean
354 355 356
g_atomic_int_compare_and_exchange (volatile gint *atomic, 
				   gint           oldval, 
				   gint           newval)
357
{
358
  gint result;
359 360 361 362 363 364 365 366 367 368 369 370
#if ASM_NUMERIC_LABELS
  __asm__ __volatile__ ("sync\n"
			"1: lwarx   %0,0,%1\n"
			"   subf.   %0,%2,%0\n"
			"   bne     2f\n"
			"   stwcx.  %3,0,%1\n"
			"   bne-    1b\n"
			"2: isync"
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
#else
371
  __asm__ __volatile__ ("sync\n"
372
			".L1icae%=: lwarx   %0,0,%1\n"
373
			"   subf.   %0,%2,%0\n"
374
			"   bne     .L2icae%=\n"
375
			"   stwcx.  %3,0,%1\n"
376 377
			"   bne-    .L1icae%=\n"
			".L2icae%=: isync"
378 379 380
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
381
#endif
382
  return result == 0;
383 384 385
}

gboolean
386 387 388
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
389
{
390
  gpointer result;
391 392 393 394 395 396 397 398 399 400 401 402
#if ASM_NUMERIC_LABELS
  __asm__ __volatile__ ("sync\n"
			"1: lwarx   %0,0,%1\n"
			"   subf.   %0,%2,%0\n"
			"   bne     2f\n"
			"   stwcx.  %3,0,%1\n"
			"   bne-    1b\n"
			"2: isync"
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
#else
403
  __asm__ __volatile__ ("sync\n"
404
			".L1pcae%=: lwarx   %0,0,%1\n"
405
			"   subf.   %0,%2,%0\n"
406
			"   bne     .L2pcae%=\n"
407
			"   stwcx.  %3,0,%1\n"
408 409
			"   bne-    .L1pcae%=\n"
			".L2pcae%=: isync"
410 411 412
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
413
#endif
414 415 416 417
  return result == 0;
}
#   elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
gboolean
418 419 420
g_atomic_int_compare_and_exchange (volatile gint *atomic,
				   gint           oldval, 
				   gint           newval)
421
{
422
  gpointer result;
423 424 425 426 427 428 429 430 431 432 433 434 435
#if ASM_NUMERIC_LABELS
  __asm__ __volatile__ ("sync\n"
			"1: lwarx   %0,0,%1\n"
			"   extsw   %0,%0\n"
			"   subf.   %0,%2,%0\n"
			"   bne     2f\n"
			"   stwcx.  %3,0,%1\n"
			"   bne-    1b\n"
			"2: isync"
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
#else
436
  __asm__ __volatile__ ("sync\n"
437
			".L1icae%=: lwarx   %0,0,%1\n"
438 439
			"   extsw   %0,%0\n"
			"   subf.   %0,%2,%0\n"
440
			"   bne     .L2icae%=\n"
441
			"   stwcx.  %3,0,%1\n"
442 443
			"   bne-    .L1icae%=\n"
			".L2icae%=: isync"
444 445 446
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
447
#endif
448
  return result == 0;
449 450
}

451
gboolean
452 453 454
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
455
{
456
  gpointer result;
457 458 459 460 461 462 463 464 465 466 467 468
#if ASM_NUMERIC_LABELS
  __asm__ __volatile__ ("sync\n"
			"1: ldarx   %0,0,%1\n"
			"   subf.   %0,%2,%0\n"
			"   bne     2f\n"
			"   stdcx.  %3,0,%1\n"
			"   bne-    1b\n"
			"2: isync"
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
#else
469
  __asm__ __volatile__ ("sync\n"
470
			".L1pcae%=: ldarx   %0,0,%1\n"
471
			"   subf.   %0,%2,%0\n"
472
			"   bne     .L2pcae%=\n"
473
			"   stdcx.  %3,0,%1\n"
474 475
			"   bne-    .L1pcae%=\n"
			".L2pcae%=: isync"
476 477 478
			: "=&r" (result)
			: "b" (atomic), "r" (oldval), "r" (newval)
			: "cr0", "memory"); 
479
#endif
480
  return result == 0;
481
}
482 483 484 485
#  else /* What's that */
#   error "Your system has an unsupported pointer size"
#  endif /* GLIB_SIZEOF_VOID_P */

486
#  define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
487

488 489 490 491
# elif defined (G_ATOMIC_IA64)
/* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
 */
gint
492 493
g_atomic_int_exchange_and_add (volatile gint *atomic,
			       gint           val)
494
{
Matthias Clasen's avatar
Matthias Clasen committed
495
  return __sync_fetch_and_add (atomic, val);
496 497 498
}
 
void
499
g_atomic_int_add (volatile gint *atomic, 
500 501
		  gint val)
{
Matthias Clasen's avatar
Matthias Clasen committed
502
  __sync_fetch_and_add (atomic, val);
503 504 505
}

gboolean
506 507 508
g_atomic_int_compare_and_exchange (volatile gint *atomic,
				   gint           oldval, 
				   gint           newval)
509
{
Matthias Clasen's avatar
Matthias Clasen committed
510
  return __sync_bool_compare_and_swap (atomic, oldval, newval);
511 512 513
}

gboolean
514 515 516
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
				       gpointer           oldval, 
				       gpointer           newval)
517
{
Matthias Clasen's avatar
Matthias Clasen committed
518 519
  return __sync_bool_compare_and_swap ((long *)atomic, 
				       (long)oldval, (long)newval);
520 521 522
}

#  define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
523 524 525 526 527 528 529 530 531 532 533 534 535 536
# elif defined (G_ATOMIC_S390)
/* Adapted from glibc's sysdeps/s390/bits/atomic.h
 */
#  define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval)			\
  ({ 									\
     gint __result = oldval;					\
     __asm__ __volatile__ ("cs %0, %2, %1"				\
                           : "+d" (__result), "=Q" (*(atomic))		\
                           : "d" (newval), "m" (*(atomic)) : "cc" );	\
     __result == oldval;						\
  })

#  if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
gboolean
537 538 539
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
				       gpointer           oldval,
				       gpointer           newval)
540 541 542 543 544
{
  gpointer result = oldval;
  __asm__ __volatile__ ("cs %0, %2, %1"
			: "+d" (result), "=Q" (*(atomic))
			: "d" (newval), "m" (*(atomic)) : "cc" );
545
  return result == oldval;
546 547 548
}
#  elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
gboolean
549 550 551
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
				       gpointer           oldval,
				       gpointer           newval)
552 553 554 555 556 557
{
  gpointer result = oldval;
  gpointer *a = atomic;
  __asm__ __volatile__ ("csg %0, %2, %1"
			: "+d" (result), "=Q" (*a)
			: "d" ((long)(newval)), "m" (*a) : "cc" );
558
  return result == oldval;
559 560 561 562
}
#  else /* What's that */
#    error "Your system has an unsupported pointer size"
#  endif /* GLIB_SIZEOF_VOID_P */
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
# elif defined (G_ATOMIC_ARM)
static volatile int atomic_spin = 0;

static int atomic_spin_trylock (void)
{
  int result;

  asm volatile (
    "swp %0, %1, [%2]\n"
    : "=&r,&r" (result)
    : "r,0" (1), "r,r" (&atomic_spin)
    : "memory");
  if (result == 0)
    return 0;
  else
    return -1;
}

static void atomic_spin_lock (void)
{
  while (atomic_spin_trylock())
    sched_yield();
}

static void atomic_spin_unlock (void)
{
  atomic_spin = 0;
}

gint
g_atomic_int_exchange_and_add (volatile gint *atomic, 
			       gint           val)
{
  gint result;
 
  atomic_spin_lock();  
  result = *atomic;
  *atomic += val;
  atomic_spin_unlock();

  return result;
}

void
g_atomic_int_add (volatile gint *atomic,
		  gint           val)
{
  atomic_spin_lock();
  *atomic += val;
  atomic_spin_unlock();
}

gboolean
g_atomic_int_compare_and_exchange (volatile gint *atomic, 
				   gint           oldval, 
				   gint           newval)
{
  gboolean result;

  atomic_spin_lock();
  if (*atomic == oldval)
    {
      result = TRUE;
      *atomic = newval;
    }
  else
    result = FALSE;
  atomic_spin_unlock();

  return result;
}

gboolean
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
{
  gboolean result;
 
  atomic_spin_lock();
  if (*atomic == oldval)
    {
      result = TRUE;
      *atomic = newval;
    }
  else
    result = FALSE;
  atomic_spin_unlock();

  return result;
}
# else /* !G_ATOMIC_ARM */
655
#  define DEFINE_WITH_MUTEXES
656
# endif /* G_ATOMIC_IA64 */
657
#else /* !__GNUC__ */
658 659 660 661 662
# ifdef G_PLATFORM_WIN32
#  define DEFINE_WITH_WIN32_INTERLOCKED
# else
#  define DEFINE_WITH_MUTEXES
# endif
663
#endif /* __GNUC__ */
664

665 666
#ifdef DEFINE_WITH_WIN32_INTERLOCKED
# include <windows.h>
667 668 669 670 671 672 673 674 675 676 677
/* Following indicates that InterlockedCompareExchangePointer is
 * declared in winbase.h (included by windows.h) and needs to be
 * commented out if not true. It is defined iff WINVER > 0x0400,
 * which is usually correct but can be wrong if WINVER is set before
 * windows.h is included.
 */
# if WINVER > 0x0400
#  define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
# endif

gint32
678 679
g_atomic_int_exchange_and_add (volatile gint32 *atomic,
			       gint32           val)
680 681 682 683 684
{
  return InterlockedExchangeAdd (atomic, val);
}

void     
685 686
g_atomic_int_add (volatile gint32 *atomic, 
		  gint32           val)
687 688 689 690 691
{
  InterlockedExchangeAdd (atomic, val);
}

gboolean 
692 693 694
g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
				   gint32           oldval,
				   gint32           newval)
695
{
696
#ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
697 698 699
  return (guint32) InterlockedCompareExchange ((PVOID*)atomic, 
                                               (PVOID)newval, 
                                               (PVOID)oldval) == oldval;
700 701 702 703 704
#else
  return InterlockedCompareExchange (atomic, 
                                     newval, 
                                     oldval) == oldval;
#endif
705 706 707
}

gboolean 
708 709 710
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
				       gpointer           oldval,
				       gpointer           newval)
711
{
712 713
# ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
  return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
714
# else
715 716 717
#  if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
#   error "InterlockedCompareExchangePointer needed"
#  else
718
   return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
719
#  endif
720 721 722 723
# endif
}
#endif /* DEFINE_WITH_WIN32_INTERLOCKED */

724
#ifdef DEFINE_WITH_MUTEXES
725
/* We have to use the slow, but safe locking method */
726 727
static GMutex *g_atomic_mutex; 

728
gint
729 730
g_atomic_int_exchange_and_add (volatile gint *atomic, 
			       gint           val)
731
{
732
  gint result;
733
    
734
  g_mutex_lock (g_atomic_mutex);
735 736
  result = *atomic;
  *atomic += val;
737
  g_mutex_unlock (g_atomic_mutex);
738 739 740 741 742 743

  return result;
}


void
744 745
g_atomic_int_add (volatile gint *atomic,
		  gint           val)
746
{
747
  g_mutex_lock (g_atomic_mutex);
748
  *atomic += val;
749
  g_mutex_unlock (g_atomic_mutex);
750 751 752
}

gboolean
753 754 755
g_atomic_int_compare_and_exchange (volatile gint *atomic, 
				   gint           oldval, 
				   gint           newval)
756 757 758
{
  gboolean result;
    
759
  g_mutex_lock (g_atomic_mutex);
760 761 762 763 764 765 766
  if (*atomic == oldval)
    {
      result = TRUE;
      *atomic = newval;
    }
  else
    result = FALSE;
767
  g_mutex_unlock (g_atomic_mutex);
768 769 770 771 772

  return result;
}

gboolean
773 774 775
g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic, 
				       gpointer           oldval, 
				       gpointer           newval)
776 777 778
{
  gboolean result;
    
779
  g_mutex_lock (g_atomic_mutex);
780 781 782 783 784 785 786
  if (*atomic == oldval)
    {
      result = TRUE;
      *atomic = newval;
    }
  else
    result = FALSE;
787
  g_mutex_unlock (g_atomic_mutex);
788 789 790 791

  return result;
}

792
#ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
793
gint
794
g_atomic_int_get (volatile gint *atomic)
795
{
796
  gint result;
797

798
  g_mutex_lock (g_atomic_mutex);
799
  result = *atomic;
800
  g_mutex_unlock (g_atomic_mutex);
801 802 803 804

  return result;
}

805 806 807 808 809 810 811 812 813
void
g_atomic_int_set (volatile gint *atomic,
                  gint           newval)
{
  g_mutex_lock (g_atomic_mutex);
  *atomic = newval;
  g_mutex_unlock (g_atomic_mutex);
}

814
gpointer
815
g_atomic_pointer_get (volatile gpointer *atomic)
816 817 818
{
  gpointer result;

819
  g_mutex_lock (g_atomic_mutex);
820
  result = *atomic;
821
  g_mutex_unlock (g_atomic_mutex);
822 823

  return result;
824
}
825 826 827 828 829 830 831 832 833

void
g_atomic_pointer_set (volatile gpointer *atomic,
                      gpointer           newval)
{
  g_mutex_lock (g_atomic_mutex);
  *atomic = newval;
  g_mutex_unlock (g_atomic_mutex);
}
834
#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */   
835 836
#elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
gint
837
g_atomic_int_get (volatile gint *atomic)
838 839
{
  G_ATOMIC_MEMORY_BARRIER;
840 841
  return *atomic;
}
842

843 844 845 846 847 848
void
g_atomic_int_set (volatile gint *atomic,
                  gint           newval)
{
  *atomic = newval;
  G_ATOMIC_MEMORY_BARRIER; 
849 850 851
}

gpointer
852
g_atomic_pointer_get (volatile gpointer *atomic)
853
{
854
  G_ATOMIC_MEMORY_BARRIER;
855
  return *atomic;
856
}   
857 858 859 860 861 862 863 864

void
g_atomic_pointer_set (volatile gpointer *atomic,
                      gpointer           newval)
{
  *atomic = newval;
  G_ATOMIC_MEMORY_BARRIER; 
}
865 866 867 868
#endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */

#ifdef ATOMIC_INT_CMP_XCHG
gboolean
869 870 871
g_atomic_int_compare_and_exchange (volatile gint *atomic,
				   gint           oldval,
				   gint           newval)
872 873
{
  return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
874 875
}

876
gint
877 878
g_atomic_int_exchange_and_add (volatile gint *atomic,
			       gint           val)
879 880 881 882 883 884 885 886 887 888
{
  gint result;
  do
    result = *atomic;
  while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));

  return result;
}
 
void
889 890
g_atomic_int_add (volatile gint *atomic,
		  gint           val)
891 892 893 894 895 896 897
{
  gint result;
  do
    result = *atomic;
  while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
}
#endif /* ATOMIC_INT_CMP_XCHG */
898 899

void 
900
_g_atomic_thread_init (void)
901 902 903 904 905
{
#ifdef DEFINE_WITH_MUTEXES
  g_atomic_mutex = g_mutex_new ();
#endif /* DEFINE_WITH_MUTEXES */
}
906

907 908
#ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
gint
909
(g_atomic_int_get) (volatile gint *atomic)
910 911 912 913
{
  return g_atomic_int_get (atomic);
}

914 915 916 917 918 919 920
void
(g_atomic_int_set) (volatile gint *atomic,
		    gint           newval)
{
  g_atomic_int_set (atomic, newval);
}

921
gpointer
922
(g_atomic_pointer_get) (volatile gpointer *atomic)
923 924 925
{
  return g_atomic_pointer_get (atomic);
}
926 927 928 929 930 931 932

void
(g_atomic_pointer_set) (volatile gpointer *atomic,
			gpointer           newval)
{
  g_atomic_pointer_set (atomic, newval);
}
933 934
#endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */

Matthias Clasen's avatar
Matthias Clasen committed
935
#define __G_ATOMIC_C__
936
#include "galiasdef.c"