gatomic.c 22.4 KB
Newer Older
1 2
/*
 * Copyright © 2011 Ryan Lortie
3
 *
4 5 6 7
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
8
 *
9 10
 * This library is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 12 13 14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
15
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 17
 *
 * Author: Ryan Lortie <desrt@desrt.ca>
18
 */
19

20 21
#include "config.h"

22
#include "gatomic.h"
23

24 25 26
/**
 * SECTION:atomic_operations
 * @title: Atomic Operations
Matthias Clasen's avatar
Matthias Clasen committed
27
 * @short_description: basic atomic integer and pointer operations
28 29
 * @see_also: #GMutex
 *
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
 * The following is a collection of compiler macros to provide atomic
 * access to integer and pointer-sized values.
 *
 * The macros that have 'int' in the name will operate on pointers to
 * #gint and #guint.  The macros with 'pointer' in the name will operate
 * on pointers to any pointer-sized value, including #gsize.  There is
 * no support for 64bit operations on platforms with 32bit pointers
 * because it is not generally possible to perform these operations
 * atomically.
 *
 * The get, set and exchange operations for integers and pointers
 * nominally operate on #gint and #gpointer, respectively.  Of the
 * arithmetic operations, the 'add' operation operates on (and returns)
 * signed integer values (#gint and #gssize) and the 'and', 'or', and
 * 'xor' operations operate on (and return) unsigned integer values
 * (#guint and #gsize).
 *
 * All of the operations act as a full compiler and (where appropriate)
 * hardware memory barrier.  Acquire and release or producer and
 * consumer barrier semantics are not available through this API.
 *
 * It is very important that all accesses to a particular integer or
 * pointer be performed using only this API and that different sizes of
 * operation are not mixed or used on overlapping memory regions.  Never
 * read or assign directly from or to a value -- always use this API.
 *
 * For simple reference counting purposes you should use
 * g_atomic_int_inc() and g_atomic_int_dec_and_test().  Other uses that
 * fall outside of simple reference counting patterns are prone to
 * subtle bugs and occasionally undefined behaviour.  It is also worth
 * noting that since all of these operations require global
61 62
 * synchronisation of the entire machine, they can be quite slow.  In
 * the case of performing multiple atomic operations it can often be
63 64 65 66
 * faster to simply acquire a mutex lock around the critical area,
 * perform the operations normally and then release the lock.
 **/

67 68 69 70 71 72 73 74 75 76 77 78 79
/**
 * G_ATOMIC_LOCK_FREE:
 *
 * This macro is defined if the atomic operations of GLib are
 * implemented using real hardware atomic operations.  This means that
 * the GLib atomic API can be used between processes and safely mixed
 * with other (hardware) atomic APIs.
 *
 * If this macro is not defined, the atomic operations may be
 * emulated using a mutex.  In that case, the GLib atomic operations are
 * only atomic relative to themselves and within a single process.
 **/

80 81 82 83 84 85 86 87 88 89 90
/* NOTE CAREFULLY:
 *
 * This file is the lowest-level part of GLib.
 *
 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
 * messages, etc) call into these functions and macros to get work done.
 *
 * As such, these functions can not call back into any part of GLib
 * without risking recursion.
 */

91
#ifdef G_ATOMIC_LOCK_FREE
92

93
/* if G_ATOMIC_LOCK_FREE was defined by `meson configure` then we MUST
94 95
 * implement the atomic operations in a lock-free manner.
 */
96

97
#if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
98

99 100 101 102 103 104
/**
 * g_atomic_int_get:
 * @atomic: a pointer to a #gint or #guint
 *
 * Gets the current value of @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
105 106
 * This call acts as a full compiler and hardware
 * memory barrier (before the get).
107 108 109 110 111
 *
 * Returns: the value of the integer
 *
 * Since: 2.4
 **/
112
gint
113
(g_atomic_int_get) (const volatile gint *atomic)
114
{
115
  return g_atomic_int_get (atomic);
116
}
117 118 119 120 121 122 123 124

/**
 * g_atomic_int_set:
 * @atomic: a pointer to a #gint or #guint
 * @newval: a new value to store
 *
 * Sets the value of @atomic to @newval.
 *
Matthias Clasen's avatar
Matthias Clasen committed
125 126
 * This call acts as a full compiler and hardware
 * memory barrier (after the set).
127 128
 *
 * Since: 2.4
Matthias Clasen's avatar
Matthias Clasen committed
129
 */
130
void
131 132
(g_atomic_int_set) (volatile gint *atomic,
                    gint           newval)
133
{
134
  g_atomic_int_set (atomic, newval);
135 136
}

137 138 139 140 141 142
/**
 * g_atomic_int_inc:
 * @atomic: a pointer to a #gint or #guint
 *
 * Increments the value of @atomic by 1.
 *
143
 * Think of this operation as an atomic version of `{ *atomic += 1; }`.
Matthias Clasen's avatar
Matthias Clasen committed
144
 *
145 146 147 148 149 150
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Since: 2.4
 **/
void
(g_atomic_int_inc) (volatile gint *atomic)
151
{
152
  g_atomic_int_inc (atomic);
153 154
}

155 156 157 158 159 160
/**
 * g_atomic_int_dec_and_test:
 * @atomic: a pointer to a #gint or #guint
 *
 * Decrements the value of @atomic by 1.
 *
Matthias Clasen's avatar
Matthias Clasen committed
161
 * Think of this operation as an atomic version of
162
 * `{ *atomic -= 1; return (*atomic == 0); }`.
Matthias Clasen's avatar
Matthias Clasen committed
163
 *
164 165 166 167 168 169
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: %TRUE if the resultant value is zero
 *
 * Since: 2.4
 **/
170
gboolean
171
(g_atomic_int_dec_and_test) (volatile gint *atomic)
172
{
173
  return g_atomic_int_dec_and_test (atomic);
174 175
}

176 177 178 179 180 181
/**
 * g_atomic_int_compare_and_exchange:
 * @atomic: a pointer to a #gint or #guint
 * @oldval: the value to compare with
 * @newval: the value to conditionally replace with
 *
Matthias Clasen's avatar
Matthias Clasen committed
182 183
 * Compares @atomic to @oldval and, if equal, sets it to @newval.
 * If @atomic was not equal to @oldval then no change occurs.
184 185 186
 *
 * This compare and exchange is done atomically.
 *
Matthias Clasen's avatar
Matthias Clasen committed
187
 * Think of this operation as an atomic version of
188
 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
Matthias Clasen's avatar
Matthias Clasen committed
189
 *
190 191 192 193 194 195
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: %TRUE if the exchange took place
 *
 * Since: 2.4
 **/
196
gboolean
197 198 199
(g_atomic_int_compare_and_exchange) (volatile gint *atomic,
                                     gint           oldval,
                                     gint           newval)
200
{
201
  return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
202
}
203 204 205 206 207 208 209 210

/**
 * g_atomic_int_add:
 * @atomic: a pointer to a #gint or #guint
 * @val: the value to add
 *
 * Atomically adds @val to the value of @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
211
 * Think of this operation as an atomic version of
212
 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
213
 *
214 215
 * This call acts as a full compiler and hardware memory barrier.
 *
216 217 218
 * Before version 2.30, this function did not return a value
 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
 *
219 220 221 222
 * Returns: the value of @atomic before the add, signed
 *
 * Since: 2.4
 **/
223
gint
224 225
(g_atomic_int_add) (volatile gint *atomic,
                    gint           val)
226
{
227
  return g_atomic_int_add (atomic, val);
228
}
229 230 231 232 233 234 235 236 237 238 239

/**
 * g_atomic_int_and:
 * @atomic: a pointer to a #gint or #guint
 * @val: the value to 'and'
 *
 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
 * storing the result back in @atomic.
 *
 * This call acts as a full compiler and hardware memory barrier.
 *
Matthias Clasen's avatar
Matthias Clasen committed
240
 * Think of this operation as an atomic version of
241
 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
242
 *
243 244 245 246 247 248 249
 * Returns: the value of @atomic before the operation, unsigned
 *
 * Since: 2.30
 **/
guint
(g_atomic_int_and) (volatile guint *atomic,
                    guint           val)
250
{
251
  return g_atomic_int_and (atomic, val);
252 253
}

254 255 256 257 258 259 260 261
/**
 * g_atomic_int_or:
 * @atomic: a pointer to a #gint or #guint
 * @val: the value to 'or'
 *
 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
 * storing the result back in @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
262
 * Think of this operation as an atomic version of
263
 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
264
 *
265 266 267 268 269 270 271 272 273
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: the value of @atomic before the operation, unsigned
 *
 * Since: 2.30
 **/
guint
(g_atomic_int_or) (volatile guint *atomic,
                   guint           val)
274
{
275
  return g_atomic_int_or (atomic, val);
276 277
}

278 279 280 281 282 283 284 285
/**
 * g_atomic_int_xor:
 * @atomic: a pointer to a #gint or #guint
 * @val: the value to 'xor'
 *
 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
 * storing the result back in @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
286
 * Think of this operation as an atomic version of
287
 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
288
 *
289 290 291 292 293 294 295 296 297
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: the value of @atomic before the operation, unsigned
 *
 * Since: 2.30
 **/
guint
(g_atomic_int_xor) (volatile guint *atomic,
                    guint           val)
298
{
299
  return g_atomic_int_xor (atomic, val);
300 301
}

302 303 304

/**
 * g_atomic_pointer_get:
305
 * @atomic: (not nullable): a pointer to a #gpointer-sized value
306 307 308
 *
 * Gets the current value of @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
309 310
 * This call acts as a full compiler and hardware
 * memory barrier (before the get).
311 312 313 314 315 316
 *
 * Returns: the value of the pointer
 *
 * Since: 2.4
 **/
gpointer
317
(g_atomic_pointer_get) (const volatile void *atomic)
318
{
319
  return g_atomic_pointer_get ((const volatile gpointer *) atomic);
320
}
321 322 323

/**
 * g_atomic_pointer_set:
324
 * @atomic: (not nullable): a pointer to a #gpointer-sized value
325 326 327 328
 * @newval: a new value to store
 *
 * Sets the value of @atomic to @newval.
 *
Matthias Clasen's avatar
Matthias Clasen committed
329 330
 * This call acts as a full compiler and hardware
 * memory barrier (after the set).
331 332 333
 *
 * Since: 2.4
 **/
334
void
335 336
(g_atomic_pointer_set) (volatile void *atomic,
                        gpointer       newval)
337
{
338
  g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
339
}
340

341 342
/**
 * g_atomic_pointer_compare_and_exchange:
343
 * @atomic: (not nullable): a pointer to a #gpointer-sized value
344 345 346
 * @oldval: the value to compare with
 * @newval: the value to conditionally replace with
 *
Matthias Clasen's avatar
Matthias Clasen committed
347 348
 * Compares @atomic to @oldval and, if equal, sets it to @newval.
 * If @atomic was not equal to @oldval then no change occurs.
349 350 351
 *
 * This compare and exchange is done atomically.
 *
Matthias Clasen's avatar
Matthias Clasen committed
352
 * Think of this operation as an atomic version of
353
 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
Matthias Clasen's avatar
Matthias Clasen committed
354
 *
355 356 357 358 359 360 361 362 363 364
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: %TRUE if the exchange took place
 *
 * Since: 2.4
 **/
gboolean
(g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
                                         gpointer       oldval,
                                         gpointer       newval)
365
{
366 367
  return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
                                                oldval, newval);
368
}
369

370 371
/**
 * g_atomic_pointer_add:
372
 * @atomic: (not nullable): a pointer to a #gpointer-sized value
373 374 375 376
 * @val: the value to add
 *
 * Atomically adds @val to the value of @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
377
 * Think of this operation as an atomic version of
378
 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
379
 *
380 381 382 383 384 385 386 387 388
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: the value of @atomic before the add, signed
 *
 * Since: 2.30
 **/
gssize
(g_atomic_pointer_add) (volatile void *atomic,
                        gssize         val)
389
{
390
  return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
391 392
}

393 394
/**
 * g_atomic_pointer_and:
395
 * @atomic: (not nullable): a pointer to a #gpointer-sized value
396 397 398 399 400
 * @val: the value to 'and'
 *
 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
 * storing the result back in @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
401
 * Think of this operation as an atomic version of
402
 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
403
 *
404 405 406 407 408 409 410 411 412
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: the value of @atomic before the operation, unsigned
 *
 * Since: 2.30
 **/
gsize
(g_atomic_pointer_and) (volatile void *atomic,
                        gsize          val)
413
{
414
  return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
415
}
416 417 418

/**
 * g_atomic_pointer_or:
419
 * @atomic: (not nullable): a pointer to a #gpointer-sized value
420 421 422 423 424
 * @val: the value to 'or'
 *
 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
 * storing the result back in @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
425
 * Think of this operation as an atomic version of
426
 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
427
 *
428 429 430 431 432 433 434 435 436
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: the value of @atomic before the operation, unsigned
 *
 * Since: 2.30
 **/
gsize
(g_atomic_pointer_or) (volatile void *atomic,
                       gsize          val)
437
{
438
  return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
439 440
}

441 442
/**
 * g_atomic_pointer_xor:
443
 * @atomic: (not nullable): a pointer to a #gpointer-sized value
444 445 446 447 448
 * @val: the value to 'xor'
 *
 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
 * storing the result back in @atomic.
 *
Matthias Clasen's avatar
Matthias Clasen committed
449
 * Think of this operation as an atomic version of
450
 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
Matthias Clasen's avatar
Matthias Clasen committed
451
 *
452 453 454 455 456 457 458 459 460
 * This call acts as a full compiler and hardware memory barrier.
 *
 * Returns: the value of @atomic before the operation, unsigned
 *
 * Since: 2.30
 **/
gsize
(g_atomic_pointer_xor) (volatile void *atomic,
                        gsize          val)
461
{
462
  return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
463
}
464

465
#elif defined (G_PLATFORM_WIN32)
466

467
#include <windows.h>
468
#if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
469 470 471 472 473
#define InterlockedAnd _InterlockedAnd
#define InterlockedOr _InterlockedOr
#define InterlockedXor _InterlockedXor
#endif

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
#if !defined (_MSC_VER) || _MSC_VER <= 1200
#include "gmessages.h"
/* Inlined versions for older compiler */
static LONG
_gInterlockedAnd (volatile guint *atomic,
                  guint           val)
{
  LONG i, j;

  j = *atomic;
  do {
    i = j;
    j = InterlockedCompareExchange(atomic, i & val, i);
  } while (i != j);

  return j;
}
#define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
static LONG
_gInterlockedOr (volatile guint *atomic,
                 guint           val)
{
  LONG i, j;

  j = *atomic;
  do {
    i = j;
    j = InterlockedCompareExchange(atomic, i | val, i);
  } while (i != j);

  return j;
}
#define InterlockedOr(a,b) _gInterlockedOr(a,b)
static LONG
_gInterlockedXor (volatile guint *atomic,
                  guint           val)
{
  LONG i, j;

  j = *atomic;
  do {
    i = j;
    j = InterlockedCompareExchange(atomic, i ^ val, i);
  } while (i != j);

  return j;
}
#define InterlockedXor(a,b) _gInterlockedXor(a,b)
#endif

524 525
/*
 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
526 527
 */
gint
528
(g_atomic_int_get) (const volatile gint *atomic)
529
{
530
  MemoryBarrier ();
531
  return *atomic;
532
}
533

534
void
535 536
(g_atomic_int_set) (volatile gint *atomic,
                    gint           newval)
537
{
538
  *atomic = newval;
539
  MemoryBarrier ();
540 541
}

542 543
void
(g_atomic_int_inc) (volatile gint *atomic)
544
{
545
  InterlockedIncrement (atomic);
546 547 548
}

gboolean
549
(g_atomic_int_dec_and_test) (volatile gint *atomic)
550
{
551
  return InterlockedDecrement (atomic) == 0;
552 553
}

554
gboolean
555 556 557
(g_atomic_int_compare_and_exchange) (volatile gint *atomic,
                                     gint           oldval,
                                     gint           newval)
558
{
559
  return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
560 561 562
}

gint
563 564
(g_atomic_int_add) (volatile gint *atomic,
                    gint           val)
565
{
566
  return InterlockedExchangeAdd (atomic, val);
567 568
}

569 570 571
guint
(g_atomic_int_and) (volatile guint *atomic,
                    guint           val)
572
{
573
  return InterlockedAnd (atomic, val);
574 575
}

576 577 578
guint
(g_atomic_int_or) (volatile guint *atomic,
                   guint           val)
579
{
580
  return InterlockedOr (atomic, val);
581 582
}

583 584 585
guint
(g_atomic_int_xor) (volatile guint *atomic,
                    guint           val)
586
{
587
  return InterlockedXor (atomic, val);
588
}
589 590


591
gpointer
592
(g_atomic_pointer_get) (const volatile void *atomic)
593
{
594
  const volatile gpointer *ptr = atomic;
595

596
  MemoryBarrier ();
597
  return *ptr;
598 599
}

600 601 602
void
(g_atomic_pointer_set) (volatile void *atomic,
                        gpointer       newval)
603
{
604
  volatile gpointer *ptr = atomic;
605

606
  *ptr = newval;
607
  MemoryBarrier ();
608 609
}

610 611 612 613
gboolean
(g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
                                         gpointer       oldval,
                                         gpointer       newval)
614
{
615
  return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
616 617
}

618 619 620
gssize
(g_atomic_pointer_add) (volatile void *atomic,
                        gssize         val)
621
{
622 623 624
#if GLIB_SIZEOF_VOID_P == 8
  return InterlockedExchangeAdd64 (atomic, val);
#else
625
  return InterlockedExchangeAdd (atomic, val);
626
#endif
627 628
}

629 630 631
gsize
(g_atomic_pointer_and) (volatile void *atomic,
                        gsize          val)
632
{
633 634 635 636 637
#if GLIB_SIZEOF_VOID_P == 8
  return InterlockedAnd64 (atomic, val);
#else
  return InterlockedAnd (atomic, val);
#endif
638 639
}

640 641 642
gsize
(g_atomic_pointer_or) (volatile void *atomic,
                       gsize          val)
643
{
644 645
#if GLIB_SIZEOF_VOID_P == 8
  return InterlockedOr64 (atomic, val);
646
#else
647
  return InterlockedOr (atomic, val);
648
#endif
649 650
}

651 652 653
gsize
(g_atomic_pointer_xor) (volatile void *atomic,
                        gsize          val)
654
{
655 656 657 658 659
#if GLIB_SIZEOF_VOID_P == 8
  return InterlockedXor64 (atomic, val);
#else
  return InterlockedXor (atomic, val);
#endif
660
}
661 662
#else

663
/* This error occurs when `meson configure` decided that we should be capable
664 665 666 667 668 669 670 671
 * of lock-free atomics but we find at compile-time that we are not.
 */
#error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.

#endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */

#else /* G_ATOMIC_LOCK_FREE */

672 673 674 675 676 677 678
/* We are not permitted to call into any GLib functions from here, so we
 * can not use GMutex.
 *
 * Fortunately, we already take care of the Windows case above, and all
 * non-Windows platforms on which glib runs have pthreads.  Use those.
 */
#include <pthread.h>
679

680
static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
681

682
gint
683
(g_atomic_int_get) (const volatile gint *atomic)
684
{
685 686
  gint value;

687
  pthread_mutex_lock (&g_atomic_lock);
688
  value = *atomic;
689
  pthread_mutex_unlock (&g_atomic_lock);
690 691

  return value;
692 693 694
}

void
695 696
(g_atomic_int_set) (volatile gint *atomic,
                    gint           value)
697
{
698
  pthread_mutex_lock (&g_atomic_lock);
699
  *atomic = value;
700
  pthread_mutex_unlock (&g_atomic_lock);
701 702
}

703 704
void
(g_atomic_int_inc) (volatile gint *atomic)
705
{
706
  pthread_mutex_lock (&g_atomic_lock);
707
  (*atomic)++;
708
  pthread_mutex_unlock (&g_atomic_lock);
709 710 711
}

gboolean
712
(g_atomic_int_dec_and_test) (volatile gint *atomic)
713
{
714
  gboolean is_zero;
715

716
  pthread_mutex_lock (&g_atomic_lock);
717
  is_zero = --(*atomic) == 0;
718
  pthread_mutex_unlock (&g_atomic_lock);
719

720 721 722 723 724 725 726
  return is_zero;
}

gboolean
(g_atomic_int_compare_and_exchange) (volatile gint *atomic,
                                     gint           oldval,
                                     gint           newval)
727
{
728
  gboolean success;
729

730
  pthread_mutex_lock (&g_atomic_lock);
731

732 733
  if ((success = (*atomic == oldval)))
    *atomic = newval;
734

735
  pthread_mutex_unlock (&g_atomic_lock);
736 737

  return success;
738 739
}

740 741 742
gint
(g_atomic_int_add) (volatile gint *atomic,
                    gint           val)
743
{
744
  gint oldval;
745

746
  pthread_mutex_lock (&g_atomic_lock);
747 748
  oldval = *atomic;
  *atomic = oldval + val;
749
  pthread_mutex_unlock (&g_atomic_lock);
750

751
  return oldval;
752
}
753

754 755 756
guint
(g_atomic_int_and) (volatile guint *atomic,
                    guint           val)
757
{
758 759
  guint oldval;

760
  pthread_mutex_lock (&g_atomic_lock);
761 762
  oldval = *atomic;
  *atomic = oldval & val;
763
  pthread_mutex_unlock (&g_atomic_lock);
764 765

  return oldval;
766
}
767 768 769 770

guint
(g_atomic_int_or) (volatile guint *atomic,
                   guint           val)
771
{
772 773
  guint oldval;

774
  pthread_mutex_lock (&g_atomic_lock);
775 776
  oldval = *atomic;
  *atomic = oldval | val;
777
  pthread_mutex_unlock (&g_atomic_lock);
778 779

  return oldval;
780
}
781

782 783 784
guint
(g_atomic_int_xor) (volatile guint *atomic,
                    guint           val)
785
{
786 787
  guint oldval;

788
  pthread_mutex_lock (&g_atomic_lock);
789 790
  oldval = *atomic;
  *atomic = oldval ^ val;
791
  pthread_mutex_unlock (&g_atomic_lock);
792 793

  return oldval;
794 795
}

796

797
gpointer
798
(g_atomic_pointer_get) (const volatile void *atomic)
799
{
800
  const volatile gpointer *ptr = atomic;
801 802
  gpointer value;

803
  pthread_mutex_lock (&g_atomic_lock);
804
  value = *ptr;
805
  pthread_mutex_unlock (&g_atomic_lock);
806 807 808

  return value;
}
809 810

void
811 812
(g_atomic_pointer_set) (volatile void *atomic,
                        gpointer       newval)
813
{
814 815
  volatile gpointer *ptr = atomic;

816
  pthread_mutex_lock (&g_atomic_lock);
817
  *ptr = newval;
818
  pthread_mutex_unlock (&g_atomic_lock);
819
}
820 821

gboolean
822 823 824
(g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
                                         gpointer       oldval,
                                         gpointer       newval)
825
{
826 827
  volatile gpointer *ptr = atomic;
  gboolean success;
828

829
  pthread_mutex_lock (&g_atomic_lock);
830

831 832 833
  if ((success = (*ptr == oldval)))
    *ptr = newval;

834
  pthread_mutex_unlock (&g_atomic_lock);
835 836

  return success;
837
}
838

839 840 841
gssize
(g_atomic_pointer_add) (volatile void *atomic,
                        gssize         val)
842
{
843 844 845
  volatile gssize *ptr = atomic;
  gssize oldval;

846
  pthread_mutex_lock (&g_atomic_lock);
847 848
  oldval = *ptr;
  *ptr = oldval + val;
849
  pthread_mutex_unlock (&g_atomic_lock);
850 851

  return oldval;
852
}
853

854 855 856
gsize
(g_atomic_pointer_and) (volatile void *atomic,
                        gsize          val)
857
{
858 859 860
  volatile gsize *ptr = atomic;
  gsize oldval;

861
  pthread_mutex_lock (&g_atomic_lock);
862 863
  oldval = *ptr;
  *ptr = oldval & val;
864
  pthread_mutex_unlock (&g_atomic_lock);
865 866

  return oldval;
867 868
}

869 870 871
gsize
(g_atomic_pointer_or) (volatile void *atomic,
                       gsize          val)
872
{
873 874 875
  volatile gsize *ptr = atomic;
  gsize oldval;

876
  pthread_mutex_lock (&g_atomic_lock);
877 878
  oldval = *ptr;
  *ptr = oldval | val;
879
  pthread_mutex_unlock (&g_atomic_lock);
880 881

  return oldval;
882 883
}

884 885 886
gsize
(g_atomic_pointer_xor) (volatile void *atomic,
                        gsize          val)
887
{
888 889 890
  volatile gsize *ptr = atomic;
  gsize oldval;

891
  pthread_mutex_lock (&g_atomic_lock);
892 893
  oldval = *ptr;
  *ptr = oldval ^ val;
894
  pthread_mutex_unlock (&g_atomic_lock);
895 896

  return oldval;
897
}
898

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
#endif

/**
 * g_atomic_int_exchange_and_add:
 * @atomic: a pointer to a #gint
 * @val: the value to add
 *
 * This function existed before g_atomic_int_add() returned the prior
 * value of the integer (which it now does).  It is retained only for
 * compatibility reasons.  Don't use this function in new code.
 *
 * Returns: the value of @atomic before the add, signed
 * Since: 2.4
 * Deprecated: 2.30: Use g_atomic_int_add() instead.
 **/
gint
g_atomic_int_exchange_and_add (volatile gint *atomic,
                               gint           val)
917
{
918
  return (g_atomic_int_add) (atomic, val);
919
}