gslice.c 60.3 KB
Newer Older
1 2 3 4 5 6
/* GLIB sliced memory - fast concurrent memory chunk allocator
 * Copyright (C) 2005 Tim Janik
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
7
 * version 2.1 of the License, or (at your option) any later version.
8 9 10 11 12 13 14
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
15
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 17
 */
/* MT safe */
Matthias Clasen's avatar
Matthias Clasen committed
18 19

#include "config.h"
20
#include "glibconfig.h"
Matthias Clasen's avatar
Matthias Clasen committed
21

22
#if defined(HAVE_POSIX_MEMALIGN) && !defined(_XOPEN_SOURCE)
23
#define _XOPEN_SOURCE 600       /* posix_memalign() */
Matthias Clasen's avatar
Matthias Clasen committed
24
#endif
25 26 27
#include <stdlib.h>             /* posix_memalign() */
#include <string.h>
#include <errno.h>
28

29
#ifdef G_OS_UNIX
30 31 32 33
#include <unistd.h>             /* sysconf() */
#endif
#ifdef G_OS_WIN32
#include <windows.h>
34
#include <process.h>
35
#endif
36

37
#include <stdio.h>              /* fputs */
38

39 40
#include "gslice.h"

41
#include "gmain.h"
42
#include "gmem.h"               /* gslice.h */
43
#include "gstrfuncs.h"
44
#include "gstrfuncsprivate.h"
45
#include "gutils.h"
46
#include "gtrashstack.h"
47 48
#include "gtestutils.h"
#include "gthread.h"
49
#include "gthreadprivate.h"
50
#include "glib_trace.h"
51
#include "gprintf.h"
52

53
#include "gvalgrind.h"
54

55 56 57 58 59 60 61 62 63 64 65 66 67
/**
 * SECTION:memory_slices
 * @title: Memory Slices
 * @short_description: efficient way to allocate groups of equal-sized
 *     chunks of memory
 *
 * Memory slices provide a space-efficient and multi-processing scalable
 * way to allocate equal-sized pieces of memory, just like the original
 * #GMemChunks (from GLib 2.8), while avoiding their excessive
 * memory-waste, scalability and performance problems.
 *
 * To achieve these goals, the slice allocator uses a sophisticated,
 * layered design that has been inspired by Bonwick's slab allocator
68 69
 * ([Bonwick94](http://citeseer.ist.psu.edu/bonwick94slab.html)
 * Jeff Bonwick, The slab allocator: An object-caching kernel
70
 * memory allocator. USENIX 1994, and
71 72
 * [Bonwick01](http://citeseer.ist.psu.edu/bonwick01magazines.html)
 * Bonwick and Jonathan Adams, Magazines and vmem: Extending the
Matthias Clasen's avatar
Matthias Clasen committed
73 74
 * slab allocator to many cpu's and arbitrary resources. USENIX 2001)
 *
75 76 77 78 79 80 81 82 83 84 85 86 87 88
 * It uses posix_memalign() to optimize allocations of many equally-sized
 * chunks, and has per-thread free lists (the so-called magazine layer)
 * to quickly satisfy allocation requests of already known structure sizes.
 * This is accompanied by extra caching logic to keep freed memory around
 * for some time before returning it to the system. Memory that is unused
 * due to alignment constraints is used for cache colorization (random
 * distribution of chunk addresses) to improve CPU cache utilization. The
 * caching layer of the slice allocator adapts itself to high lock contention
 * to improve scalability.
 *
 * The slice allocator can allocate blocks as small as two pointers, and
 * unlike malloc(), it does not reserve extra space per block. For large block
 * sizes, g_slice_new() and g_slice_alloc() will automatically delegate to the
 * system malloc() implementation. For newly written code it is recommended
Matthias Clasen's avatar
Matthias Clasen committed
89
 * to use the new `g_slice` API instead of g_malloc() and
90 91 92
 * friends, as long as objects are not resized during their lifetime and the
 * object size used at allocation time is still available when freeing.
 *
93
 * Here is an example for using the slice allocator:
94
 * |[<!-- language="C" --> 
95 96 97
 * gchar *mem[10000];
 * gint i;
 *
Matthias Clasen's avatar
Matthias Clasen committed
98
 * // Allocate 10000 blocks.
99
 * for (i = 0; i < 10000; i++)
100 101 102
 *   {
 *     mem[i] = g_slice_alloc (50);
 *
Matthias Clasen's avatar
Matthias Clasen committed
103
 *     // Fill in the memory with some junk.
104
 *     for (j = 0; j < 50; j++)
105 106 107
 *       mem[i][j] = i * j;
 *   }
 *
Matthias Clasen's avatar
Matthias Clasen committed
108
 * // Now free all of the blocks.
109 110 111
 * for (i = 0; i < 10000; i++)
 *   g_slice_free1 (50, mem[i]);
 * ]|
112
 *
113 114
 * And here is an example for using the using the slice allocator
 * with data structures:
115
 * |[<!-- language="C" --> 
116 117
 * GRealArray *array;
 *
Matthias Clasen's avatar
Matthias Clasen committed
118
 * // Allocate one block, using the g_slice_new() macro.
119
 * array = g_slice_new (GRealArray);
120
 *
Matthias Clasen's avatar
Matthias Clasen committed
121
 * // We can now use array just like a normal pointer to a structure.
122 123 124 125 126 127 128
 * array->data            = NULL;
 * array->len             = 0;
 * array->alloc           = 0;
 * array->zero_terminated = (zero_terminated ? 1 : 0);
 * array->clear           = (clear ? 1 : 0);
 * array->elt_size        = elt_size;
 *
Matthias Clasen's avatar
Matthias Clasen committed
129
 * // We can free the block, so it can be reused.
130
 * g_slice_free (GRealArray, array);
131
 * ]|
132 133
 */

134 135 136 137 138 139 140 141 142 143 144
/* the GSlice allocator is split up into 4 layers, roughly modelled after the slab
 * allocator and magazine extensions as outlined in:
 * + [Bonwick94] Jeff Bonwick, The slab allocator: An object-caching kernel
 *   memory allocator. USENIX 1994, http://citeseer.ist.psu.edu/bonwick94slab.html
 * + [Bonwick01] Bonwick and Jonathan Adams, Magazines and vmem: Extending the
 *   slab allocator to many cpu's and arbitrary resources.
 *   USENIX 2001, http://citeseer.ist.psu.edu/bonwick01magazines.html
 * the layers are:
 * - the thread magazines. for each (aligned) chunk size, a magazine (a list)
 *   of recently freed and soon to be allocated chunks is maintained per thread.
 *   this way, most alloc/free requests can be quickly satisfied from per-thread
145
 *   free lists which only require one g_private_get() call to retrieve the
146 147
 *   thread handle.
 * - the magazine cache. allocating and freeing chunks to/from threads only
148
 *   occurs at magazine sizes from a global depot of magazines. the depot
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
 *   maintaines a 15 second working set of allocated magazines, so full
 *   magazines are not allocated and released too often.
 *   the chunk size dependent magazine sizes automatically adapt (within limits,
 *   see [3]) to lock contention to properly scale performance across a variety
 *   of SMP systems.
 * - the slab allocator. this allocator allocates slabs (blocks of memory) close
 *   to the system page size or multiples thereof which have to be page aligned.
 *   the blocks are divided into smaller chunks which are used to satisfy
 *   allocations from the upper layers. the space provided by the reminder of
 *   the chunk size division is used for cache colorization (random distribution
 *   of chunk addresses) to improve processor cache utilization. multiple slabs
 *   with the same chunk size are kept in a partially sorted ring to allow O(1)
 *   freeing and allocation of chunks (as long as the allocation of an entirely
 *   new slab can be avoided).
 * - the page allocator. on most modern systems, posix_memalign(3) or
 *   memalign(3) should be available, so this is used to allocate blocks with
 *   system page size based alignments and sizes or multiples thereof.
 *   if no memalign variant is provided, valloc() is used instead and
 *   block sizes are limited to the system page size (no multiples thereof).
 *   as a fallback, on system without even valloc(), a malloc(3)-based page
 *   allocator with alloc-only behaviour is used.
 *
 * NOTES:
 * [1] some systems memalign(3) implementations may rely on boundary tagging for
 *     the handed out memory chunks. to avoid excessive page-wise fragmentation,
 *     we reserve 2 * sizeof (void*) per block size for the systems memalign(3),
 *     specified in NATIVE_MALLOC_PADDING.
 * [2] using the slab allocator alone already provides for a fast and efficient
 *     allocator, it doesn't properly scale beyond single-threaded uses though.
 *     also, the slab allocator implements eager free(3)-ing, i.e. does not
 *     provide any form of caching or working set maintenance. so if used alone,
 *     it's vulnerable to trashing for sequences of balanced (alloc, free) pairs
 *     at certain thresholds.
 * [3] magazine sizes are bound by an implementation specific minimum size and
 *     a chunk size specific maximum to limit magazine storage sizes to roughly
 *     16KB.
 * [4] allocating ca. 8 chunks per block/page keeps a good balance between
186
 *     external and internal fragmentation (<= 12.5%). [Bonwick94]
187 188 189 190 191 192 193 194 195 196
 */

/* --- macros and constants --- */
#define LARGEALIGNMENT          (256)
#define P2ALIGNMENT             (2 * sizeof (gsize))                            /* fits 2 pointers (assumed to be 2 * GLIB_SIZEOF_SIZE_T below) */
#define ALIGN(size, base)       ((base) * (gsize) (((size) + (base) - 1) / (base)))
#define NATIVE_MALLOC_PADDING   P2ALIGNMENT                                     /* per-page padding left for native malloc(3) see [1] */
#define SLAB_INFO_SIZE          P2ALIGN (sizeof (SlabInfo) + NATIVE_MALLOC_PADDING)
#define MAX_MAGAZINE_SIZE       (256)                                           /* see [3] and allocator_get_magazine_threshold() for this */
#define MIN_MAGAZINE_SIZE       (4)
197
#define MAX_STAMP_COUNTER       (7)                                             /* distributes the load of gettimeofday() */
198 199 200 201
#define MAX_SLAB_CHUNK_SIZE(al) (((al)->max_page_size - SLAB_INFO_SIZE) / 8)    /* we want at last 8 chunks per page, see [4] */
#define MAX_SLAB_INDEX(al)      (SLAB_INDEX (al, MAX_SLAB_CHUNK_SIZE (al)) + 1)
#define SLAB_INDEX(al, asize)   ((asize) / P2ALIGNMENT - 1)                     /* asize must be P2ALIGNMENT aligned */
#define SLAB_CHUNK_SIZE(al, ix) (((ix) + 1) * P2ALIGNMENT)
202
#define SLAB_BPAGE_SIZE(al,csz) (8 * (csz) + SLAB_INFO_SIZE)
203 204 205 206 207 208 209 210 211 212

/* optimized version of ALIGN (size, P2ALIGNMENT) */
#if     GLIB_SIZEOF_SIZE_T * 2 == 8  /* P2ALIGNMENT */
#define P2ALIGN(size)   (((size) + 0x7) & ~(gsize) 0x7)
#elif   GLIB_SIZEOF_SIZE_T * 2 == 16 /* P2ALIGNMENT */
#define P2ALIGN(size)   (((size) + 0xf) & ~(gsize) 0xf)
#else
#define P2ALIGN(size)   ALIGN (size, P2ALIGNMENT)
#endif

213 214 215 216
/* special helpers to avoid gmessage.c dependency */
static void mem_error (const char *format, ...) G_GNUC_PRINTF (1,2);
#define mem_assert(cond)    do { if (G_LIKELY (cond)) ; else mem_error ("assertion failed: %s", #cond); } while (0)

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/* --- structures --- */
typedef struct _ChunkLink      ChunkLink;
typedef struct _SlabInfo       SlabInfo;
typedef struct _CachedMagazine CachedMagazine;
struct _ChunkLink {
  ChunkLink *next;
  ChunkLink *data;
};
struct _SlabInfo {
  ChunkLink *chunks;
  guint n_allocated;
  SlabInfo *next, *prev;
};
typedef struct {
  ChunkLink *chunks;
  gsize      count;                     /* approximative chunks list length */
} Magazine;
typedef struct {
  Magazine   *magazine1;                /* array of MAX_SLAB_INDEX (allocator) */
  Magazine   *magazine2;                /* array of MAX_SLAB_INDEX (allocator) */
} ThreadMemory;
typedef struct {
  gboolean always_malloc;
  gboolean bypass_magazines;
241
  gboolean debug_blocks;
242
  gsize    working_set_msecs;
243
  guint    color_increment;
244 245 246 247 248
} SliceConfig;
typedef struct {
  /* const after initialization */
  gsize         min_page_size, max_page_size;
  SliceConfig   config;
249
  gsize         max_slab_chunk_size_for_magazine_cache;
250
  /* magazine cache */
251
  GMutex        magazine_mutex;
252 253 254 255 256 257
  ChunkLink   **magazines;                /* array of MAX_SLAB_INDEX (allocator) */
  guint        *contention_counters;      /* array of MAX_SLAB_INDEX (allocator) */
  gint          mutex_counter;
  guint         stamp_counter;
  guint         last_stamp;
  /* slab allocator */
258
  GMutex        slab_mutex;
259 260 261 262
  SlabInfo    **slab_stack;                /* array of MAX_SLAB_INDEX (allocator) */
  guint        color_accu;
} Allocator;

263
/* --- g-slice prototypes --- */
Tim Janik's avatar
Tim Janik committed
264 265
static gpointer     slab_allocator_alloc_chunk       (gsize      chunk_size);
static void         slab_allocator_free_chunk        (gsize      chunk_size,
266 267 268 269 270 271 272
                                                      gpointer   mem);
static void         private_thread_memory_cleanup    (gpointer   data);
static gpointer     allocator_memalign               (gsize      alignment,
                                                      gsize      memsize);
static void         allocator_memfree                (gsize      memsize,
                                                      gpointer   mem);
static inline void  magazine_cache_update_stamp      (void);
Tim Janik's avatar
Tim Janik committed
273
static inline gsize allocator_get_magazine_threshold (Allocator *allocator,
274 275
                                                      guint      ix);

276 277 278 279 280 281
/* --- g-slice memory checker --- */
static void     smc_notify_alloc  (void   *pointer,
                                   size_t  size);
static int      smc_notify_free   (void   *pointer,
                                   size_t  size);

282
/* --- variables --- */
283
static GPrivate    private_thread_memory = G_PRIVATE_INIT (private_thread_memory_cleanup);
284 285 286
static gsize       sys_page_size = 0;
static Allocator   allocator[1] = { { 0, }, };
static SliceConfig slice_config = {
287 288
  FALSE,        /* always_malloc */
  FALSE,        /* bypass_magazines */
289
  FALSE,        /* debug_blocks */
290
  15 * 1000,    /* working_set_msecs */
291
  1,            /* color increment, alt: 0x7fffffff */
292
};
Allison Karlitskaya's avatar
Allison Karlitskaya committed
293
static GMutex      smc_tree_mutex; /* mutex for G_SLICE=debug-blocks */
294

295
/* --- auxiliary functions --- */
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
void
g_slice_set_config (GSliceConfig ckey,
                    gint64       value)
{
  g_return_if_fail (sys_page_size == 0);
  switch (ckey)
    {
    case G_SLICE_CONFIG_ALWAYS_MALLOC:
      slice_config.always_malloc = value != 0;
      break;
    case G_SLICE_CONFIG_BYPASS_MAGAZINES:
      slice_config.bypass_magazines = value != 0;
      break;
    case G_SLICE_CONFIG_WORKING_SET_MSECS:
      slice_config.working_set_msecs = value;
      break;
312 313
    case G_SLICE_CONFIG_COLOR_INCREMENT:
      slice_config.color_increment = value;
314
      break;
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
    default: ;
    }
}

gint64
g_slice_get_config (GSliceConfig ckey)
{
  switch (ckey)
    {
    case G_SLICE_CONFIG_ALWAYS_MALLOC:
      return slice_config.always_malloc;
    case G_SLICE_CONFIG_BYPASS_MAGAZINES:
      return slice_config.bypass_magazines;
    case G_SLICE_CONFIG_WORKING_SET_MSECS:
      return slice_config.working_set_msecs;
    case G_SLICE_CONFIG_CHUNK_SIZES:
      return MAX_SLAB_INDEX (allocator);
332 333
    case G_SLICE_CONFIG_COLOR_INCREMENT:
      return slice_config.color_increment;
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
    default:
      return 0;
    }
}

gint64*
g_slice_get_config_state (GSliceConfig ckey,
                          gint64       address,
                          guint       *n_values)
{
  guint i = 0;
  g_return_val_if_fail (n_values != NULL, NULL);
  *n_values = 0;
  switch (ckey)
    {
      gint64 array[64];
    case G_SLICE_CONFIG_CONTENTION_COUNTER:
      array[i++] = SLAB_CHUNK_SIZE (allocator, address);
      array[i++] = allocator->contention_counters[address];
      array[i++] = allocator_get_magazine_threshold (allocator, address);
      *n_values = i;
355
      return g_memdup2 (array, sizeof (array[0]) * *n_values);
356 357 358 359 360
    default:
      return NULL;
    }
}

361 362 363
static void
slice_config_init (SliceConfig *config)
{
364
  const gchar *val;
365
  gchar *val_allocated = NULL;
366

367
  *config = slice_config;
368

369 370 371 372
  /* Note that the empty string (`G_SLICE=""`) is treated differently from the
   * envvar being unset. In the latter case, we also check whether running under
   * valgrind. */
#ifndef G_OS_WIN32
373
  val = g_getenv ("G_SLICE");
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
#else
  /* The win32 implementation of g_getenv() has to do UTF-8 ↔ UTF-16 conversions
   * which use the slice allocator, leading to deadlock. Use a simple in-place
   * implementation here instead.
   *
   * Ignore references to other environment variables: only support values which
   * are a combination of always-malloc and debug-blocks. */
  {

  wchar_t wvalue[128];  /* at least big enough for `always-malloc,debug-blocks` */
  int len;

  len = GetEnvironmentVariableW (L"G_SLICE", wvalue, G_N_ELEMENTS (wvalue));

  if (len == 0)
    {
      if (GetLastError () == ERROR_ENVVAR_NOT_FOUND)
        val = NULL;
      else
        val = "";
    }
  else if (len >= G_N_ELEMENTS (wvalue))
    {
      /* @wvalue isn’t big enough. Give up. */
      g_warning ("Unsupported G_SLICE value");
      val = NULL;
    }
  else
    {
      /* it’s safe to use g_utf16_to_utf8() here as it only allocates using
       * malloc() rather than GSlice */
      val = val_allocated = g_utf16_to_utf8 (wvalue, -1, NULL, NULL, NULL);
    }

  }
#endif  /* G_OS_WIN32 */

411 412 413 414 415 416 417 418 419 420 421 422 423 424
  if (val != NULL)
    {
      gint flags;
      const GDebugKey keys[] = {
        { "always-malloc", 1 << 0 },
        { "debug-blocks",  1 << 1 },
      };

      flags = g_parse_debug_string (val, keys, G_N_ELEMENTS (keys));
      if (flags & (1 << 0))
        config->always_malloc = TRUE;
      if (flags & (1 << 1))
        config->debug_blocks = TRUE;
    }
425 426 427 428 429 430 431 432
  else
    {
      /* G_SLICE was not specified, so check if valgrind is running and
       * disable ourselves if it is.
       *
       * This way it's possible to force gslice to be enabled under
       * valgrind just by setting G_SLICE to the empty string.
       */
433
#ifdef ENABLE_VALGRIND
434 435
      if (RUNNING_ON_VALGRIND)
        config->always_malloc = TRUE;
436
#endif
437
    }
438 439

  g_free (val_allocated);
440 441
}

Allison Karlitskaya's avatar
Allison Karlitskaya committed
442 443
static void
g_slice_init_nomessage (void)
444 445
{
  /* we may not use g_error() or friends here */
446 447
  mem_assert (sys_page_size == 0);
  mem_assert (MIN_MAGAZINE_SIZE >= 4);
448

449
#ifdef G_OS_WIN32
450 451 452 453 454
  {
    SYSTEM_INFO system_info;
    GetSystemInfo (&system_info);
    sys_page_size = system_info.dwPageSize;
  }
455
#else
456
  sys_page_size = sysconf (_SC_PAGESIZE); /* = sysconf (_SC_PAGE_SIZE); = getpagesize(); */
457
#endif
458 459
  mem_assert (sys_page_size >= 2 * LARGEALIGNMENT);
  mem_assert ((sys_page_size & (sys_page_size - 1)) == 0);
460
  slice_config_init (&allocator->config);
461
  allocator->min_page_size = sys_page_size;
462
#if HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN
463 464 465
  /* allow allocation of pages up to 8KB (with 8KB alignment).
   * this is useful because many medium to large sized structures
   * fit less than 8 times (see [4]) into 4KB pages.
466 467
   * we allow very small page sizes here, to reduce wastage in
   * threads if only small allocations are required (this does
468
   * bear the risk of increasing allocation times and fragmentation
469
   * though).
470 471 472
   */
  allocator->min_page_size = MAX (allocator->min_page_size, 4096);
  allocator->max_page_size = MAX (allocator->min_page_size, 8192);
473
  allocator->min_page_size = MIN (allocator->min_page_size, 128);
474 475 476 477
#else
  /* we can only align to system page size */
  allocator->max_page_size = sys_page_size;
#endif
478 479 480 481 482 483 484 485 486 487 488 489 490
  if (allocator->config.always_malloc)
    {
      allocator->contention_counters = NULL;
      allocator->magazines = NULL;
      allocator->slab_stack = NULL;
    }
  else
    {
      allocator->contention_counters = g_new0 (guint, MAX_SLAB_INDEX (allocator));
      allocator->magazines = g_new0 (ChunkLink*, MAX_SLAB_INDEX (allocator));
      allocator->slab_stack = g_new0 (SlabInfo*, MAX_SLAB_INDEX (allocator));
    }

491 492 493 494 495 496 497 498 499 500 501 502
  allocator->mutex_counter = 0;
  allocator->stamp_counter = MAX_STAMP_COUNTER; /* force initial update */
  allocator->last_stamp = 0;
  allocator->color_accu = 0;
  magazine_cache_update_stamp();
  /* values cached for performance reasons */
  allocator->max_slab_chunk_size_for_magazine_cache = MAX_SLAB_CHUNK_SIZE (allocator);
  if (allocator->config.always_malloc || allocator->config.bypass_magazines)
    allocator->max_slab_chunk_size_for_magazine_cache = 0;      /* non-optimized cases */
}

static inline guint
Tim Janik's avatar
Tim Janik committed
503
allocator_categorize (gsize aligned_chunk_size)
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
{
  /* speed up the likely path */
  if (G_LIKELY (aligned_chunk_size && aligned_chunk_size <= allocator->max_slab_chunk_size_for_magazine_cache))
    return 1;           /* use magazine cache */

  if (!allocator->config.always_malloc &&
      aligned_chunk_size &&
      aligned_chunk_size <= MAX_SLAB_CHUNK_SIZE (allocator))
    {
      if (allocator->config.bypass_magazines)
        return 2;       /* use slab allocator, see [2] */
      return 1;         /* use magazine cache */
    }
  return 0;             /* use malloc() */
}

static inline void
g_mutex_lock_a (GMutex *mutex,
Tim Janik's avatar
Tim Janik committed
522
                guint  *contention_counter)
523 524 525 526 527 528 529 530 531 532 533 534 535
{
  gboolean contention = FALSE;
  if (!g_mutex_trylock (mutex))
    {
      g_mutex_lock (mutex);
      contention = TRUE;
    }
  if (contention)
    {
      allocator->mutex_counter++;
      if (allocator->mutex_counter >= 1)        /* quickly adapt to contention */
        {
          allocator->mutex_counter = 0;
Tim Janik's avatar
Tim Janik committed
536
          *contention_counter = MIN (*contention_counter + 1, MAX_MAGAZINE_SIZE);
537 538 539 540 541 542 543 544
        }
    }
  else /* !contention */
    {
      allocator->mutex_counter--;
      if (allocator->mutex_counter < -11)       /* moderately recover magazine sizes */
        {
          allocator->mutex_counter = 0;
Tim Janik's avatar
Tim Janik committed
545
          *contention_counter = MAX (*contention_counter, 1) - 1;
546 547 548 549 550 551 552
        }
    }
}

static inline ThreadMemory*
thread_memory_from_self (void)
{
Allison Karlitskaya's avatar
Allison Karlitskaya committed
553
  ThreadMemory *tmem = g_private_get (&private_thread_memory);
554 555
  if (G_UNLIKELY (!tmem))
    {
Allison Karlitskaya's avatar
Allison Karlitskaya committed
556 557 558 559 560 561 562 563 564
      static GMutex init_mutex;
      guint n_magazines;

      g_mutex_lock (&init_mutex);
      if G_UNLIKELY (sys_page_size == 0)
        g_slice_init_nomessage ();
      g_mutex_unlock (&init_mutex);

      n_magazines = MAX_SLAB_INDEX (allocator);
565
      tmem = g_private_set_alloc0 (&private_thread_memory, sizeof (ThreadMemory) + sizeof (Magazine) * 2 * n_magazines);
566 567
      tmem->magazine1 = (Magazine*) (tmem + 1);
      tmem->magazine2 = &tmem->magazine1[n_magazines];
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
    }
  return tmem;
}

static inline ChunkLink*
magazine_chain_pop_head (ChunkLink **magazine_chunks)
{
  /* magazine chains are linked via ChunkLink->next.
   * each ChunkLink->data of the toplevel chain may point to a subchain,
   * linked via ChunkLink->next. ChunkLink->data of the subchains just
   * contains uninitialized junk.
   */
  ChunkLink *chunk = (*magazine_chunks)->data;
  if (G_UNLIKELY (chunk))
    {
      /* allocating from freed list */
      (*magazine_chunks)->data = chunk->next;
    }
  else
    {
      chunk = *magazine_chunks;
      *magazine_chunks = chunk->next;
    }
  return chunk;
}

Tim Janik's avatar
Tim Janik committed
594
#if 0 /* useful for debugging */
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
static guint
magazine_count (ChunkLink *head)
{
  guint count = 0;
  if (!head)
    return 0;
  while (head)
    {
      ChunkLink *child = head->data;
      count += 1;
      for (child = head->data; child; child = child->next)
        count += 1;
      head = head->next;
    }
  return count;
}
Tim Janik's avatar
Tim Janik committed
611
#endif
612

Tim Janik's avatar
Tim Janik committed
613
static inline gsize
614 615 616 617 618 619
allocator_get_magazine_threshold (Allocator *allocator,
                                  guint      ix)
{
  /* the magazine size calculated here has a lower bound of MIN_MAGAZINE_SIZE,
   * which is required by the implementation. also, for moderately sized chunks
   * (say >= 64 bytes), magazine sizes shouldn't be much smaller then the number
620
   * of chunks available per page/2 to avoid excessive traffic in the magazine
621 622 623 624 625
   * cache for small to medium sized structures.
   * the upper bound of the magazine size is effectively provided by
   * MAX_MAGAZINE_SIZE. for larger chunks, this number is scaled down so that
   * the content of a single magazine doesn't exceed ca. 16KB.
   */
Tim Janik's avatar
Tim Janik committed
626
  gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
627
  guint threshold = MAX (MIN_MAGAZINE_SIZE, allocator->max_page_size / MAX (5 * chunk_size, 5 * 32));
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
  guint contention_counter = allocator->contention_counters[ix];
  if (G_UNLIKELY (contention_counter))  /* single CPU bias */
    {
      /* adapt contention counter thresholds to chunk sizes */
      contention_counter = contention_counter * 64 / chunk_size;
      threshold = MAX (threshold, contention_counter);
    }
  return threshold;
}

/* --- magazine cache --- */
static inline void
magazine_cache_update_stamp (void)
{
  if (allocator->stamp_counter >= MAX_STAMP_COUNTER)
    {
644 645
      gint64 now_us = g_get_real_time ();
      allocator->last_stamp = now_us / 1000; /* milli seconds */
646 647 648 649 650 651 652 653 654
      allocator->stamp_counter = 0;
    }
  else
    allocator->stamp_counter++;
}

static inline ChunkLink*
magazine_chain_prepare_fields (ChunkLink *magazine_chunks)
{
655 656 657 658
  ChunkLink *chunk1;
  ChunkLink *chunk2;
  ChunkLink *chunk3;
  ChunkLink *chunk4;
659
  /* checked upon initialization: mem_assert (MIN_MAGAZINE_SIZE >= 4); */
660
  /* ensure a magazine with at least 4 unused data pointers */
661 662 663 664
  chunk1 = magazine_chain_pop_head (&magazine_chunks);
  chunk2 = magazine_chain_pop_head (&magazine_chunks);
  chunk3 = magazine_chain_pop_head (&magazine_chunks);
  chunk4 = magazine_chain_pop_head (&magazine_chunks);
665 666 667 668 669 670 671 672 673 674
  chunk4->next = magazine_chunks;
  chunk3->next = chunk4;
  chunk2->next = chunk3;
  chunk1->next = chunk2;
  return chunk1;
}

/* access the first 3 fields of a specially prepared magazine chain */
#define magazine_chain_prev(mc)         ((mc)->data)
#define magazine_chain_stamp(mc)        ((mc)->next->data)
Tim Janik's avatar
Tim Janik committed
675
#define magazine_chain_uint_stamp(mc)   GPOINTER_TO_UINT ((mc)->next->data)
676 677 678 679 680 681 682 683 684 685 686 687
#define magazine_chain_next(mc)         ((mc)->next->next->data)
#define magazine_chain_count(mc)        ((mc)->next->next->next->data)

static void
magazine_cache_trim (Allocator *allocator,
                     guint      ix,
                     guint      stamp)
{
  /* g_mutex_lock (allocator->mutex); done by caller */
  /* trim magazine cache from tail */
  ChunkLink *current = magazine_chain_prev (allocator->magazines[ix]);
  ChunkLink *trash = NULL;
688 689
  while (!G_APPROX_VALUE(stamp, magazine_chain_uint_stamp (current),
                         allocator->config.working_set_msecs))
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
    {
      /* unlink */
      ChunkLink *prev = magazine_chain_prev (current);
      ChunkLink *next = magazine_chain_next (current);
      magazine_chain_next (prev) = next;
      magazine_chain_prev (next) = prev;
      /* clear special fields, put on trash stack */
      magazine_chain_next (current) = NULL;
      magazine_chain_count (current) = NULL;
      magazine_chain_stamp (current) = NULL;
      magazine_chain_prev (current) = trash;
      trash = current;
      /* fixup list head if required */
      if (current == allocator->magazines[ix])
        {
          allocator->magazines[ix] = NULL;
          break;
        }
      current = prev;
    }
710
  g_mutex_unlock (&allocator->magazine_mutex);
711 712 713
  /* free trash */
  if (trash)
    {
Tim Janik's avatar
Tim Janik committed
714
      const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
715
      g_mutex_lock (&allocator->slab_mutex);
716 717 718 719 720 721 722 723 724 725 726
      while (trash)
        {
          current = trash;
          trash = magazine_chain_prev (current);
          magazine_chain_prev (current) = NULL; /* clear special field */
          while (current)
            {
              ChunkLink *chunk = magazine_chain_pop_head (&current);
              slab_allocator_free_chunk (chunk_size, chunk);
            }
        }
727
      g_mutex_unlock (&allocator->slab_mutex);
728 729 730 731 732 733 734 735 736 737
    }
}

static void
magazine_cache_push_magazine (guint      ix,
                              ChunkLink *magazine_chunks,
                              gsize      count) /* must be >= MIN_MAGAZINE_SIZE */
{
  ChunkLink *current = magazine_chain_prepare_fields (magazine_chunks);
  ChunkLink *next, *prev;
738
  g_mutex_lock (&allocator->magazine_mutex);
739 740 741 742 743 744 745 746 747 748 749 750 751
  /* add magazine at head */
  next = allocator->magazines[ix];
  if (next)
    prev = magazine_chain_prev (next);
  else
    next = prev = current;
  magazine_chain_next (prev) = current;
  magazine_chain_prev (next) = current;
  magazine_chain_prev (current) = prev;
  magazine_chain_next (current) = next;
  magazine_chain_count (current) = (gpointer) count;
  /* stamp magazine */
  magazine_cache_update_stamp();
Tim Janik's avatar
Tim Janik committed
752
  magazine_chain_stamp (current) = GUINT_TO_POINTER (allocator->last_stamp);
753 754 755 756 757 758 759 760 761 762
  allocator->magazines[ix] = current;
  /* free old magazines beyond a certain threshold */
  magazine_cache_trim (allocator, ix, allocator->last_stamp);
  /* g_mutex_unlock (allocator->mutex); was done by magazine_cache_trim() */
}

static ChunkLink*
magazine_cache_pop_magazine (guint  ix,
                             gsize *countp)
{
763
  g_mutex_lock_a (&allocator->magazine_mutex, &allocator->contention_counters[ix]);
764 765 766 767
  if (!allocator->magazines[ix])
    {
      guint magazine_threshold = allocator_get_magazine_threshold (allocator, ix);
      gsize i, chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
768
      ChunkLink *chunk, *head;
769 770
      g_mutex_unlock (&allocator->magazine_mutex);
      g_mutex_lock (&allocator->slab_mutex);
771 772 773 774
      head = slab_allocator_alloc_chunk (chunk_size);
      head->data = NULL;
      chunk = head;
      for (i = 1; i < magazine_threshold; i++)
775
        {
776 777
          chunk->next = slab_allocator_alloc_chunk (chunk_size);
          chunk = chunk->next;
778 779
          chunk->data = NULL;
        }
780
      chunk->next = NULL;
781
      g_mutex_unlock (&allocator->slab_mutex);
782
      *countp = i;
783
      return head;
784 785 786 787 788 789 790 791 792 793
    }
  else
    {
      ChunkLink *current = allocator->magazines[ix];
      ChunkLink *prev = magazine_chain_prev (current);
      ChunkLink *next = magazine_chain_next (current);
      /* unlink */
      magazine_chain_next (prev) = next;
      magazine_chain_prev (next) = prev;
      allocator->magazines[ix] = next == current ? NULL : next;
794
      g_mutex_unlock (&allocator->magazine_mutex);
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
      /* clear special fields and hand out */
      *countp = (gsize) magazine_chain_count (current);
      magazine_chain_prev (current) = NULL;
      magazine_chain_next (current) = NULL;
      magazine_chain_count (current) = NULL;
      magazine_chain_stamp (current) = NULL;
      return current;
    }
}

/* --- thread magazines --- */
static void
private_thread_memory_cleanup (gpointer data)
{
  ThreadMemory *tmem = data;
  const guint n_magazines = MAX_SLAB_INDEX (allocator);
  guint ix;
  for (ix = 0; ix < n_magazines; ix++)
    {
      Magazine *mags[2];
      guint j;
      mags[0] = &tmem->magazine1[ix];
      mags[1] = &tmem->magazine2[ix];
      for (j = 0; j < 2; j++)
        {
          Magazine *mag = mags[j];
          if (mag->count >= MIN_MAGAZINE_SIZE)
            magazine_cache_push_magazine (ix, mag->chunks, mag->count);
          else
            {
Tim Janik's avatar
Tim Janik committed
825
              const gsize chunk_size = SLAB_CHUNK_SIZE (allocator, ix);
826
              g_mutex_lock (&allocator->slab_mutex);
827 828 829 830 831
              while (mag->chunks)
                {
                  ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
                  slab_allocator_free_chunk (chunk_size, chunk);
                }
832
              g_mutex_unlock (&allocator->slab_mutex);
833 834 835 836 837 838 839 840 841 842 843
            }
        }
    }
  g_free (tmem);
}

static void
thread_memory_magazine1_reload (ThreadMemory *tmem,
                                guint         ix)
{
  Magazine *mag = &tmem->magazine1[ix];
844
  mem_assert (mag->chunks == NULL); /* ensure that we may reset mag->count */
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
  mag->count = 0;
  mag->chunks = magazine_cache_pop_magazine (ix, &mag->count);
}

static void
thread_memory_magazine2_unload (ThreadMemory *tmem,
                                guint         ix)
{
  Magazine *mag = &tmem->magazine2[ix];
  magazine_cache_push_magazine (ix, mag->chunks, mag->count);
  mag->chunks = NULL;
  mag->count = 0;
}

static inline void
thread_memory_swap_magazines (ThreadMemory *tmem,
                              guint         ix)
{
  Magazine xmag = tmem->magazine1[ix];
  tmem->magazine1[ix] = tmem->magazine2[ix];
  tmem->magazine2[ix] = xmag;
}

static inline gboolean
thread_memory_magazine1_is_empty (ThreadMemory *tmem,
                                  guint         ix)
{
  return tmem->magazine1[ix].chunks == NULL;
}

static inline gboolean
thread_memory_magazine2_is_full (ThreadMemory *tmem,
                                 guint         ix)
{
  return tmem->magazine2[ix].count >= allocator_get_magazine_threshold (allocator, ix);
}

static inline gpointer
thread_memory_magazine1_alloc (ThreadMemory *tmem,
                               guint         ix)
{
  Magazine *mag = &tmem->magazine1[ix];
  ChunkLink *chunk = magazine_chain_pop_head (&mag->chunks);
  if (G_LIKELY (mag->count > 0))
    mag->count--;
  return chunk;
}

static inline void
thread_memory_magazine2_free (ThreadMemory *tmem,
                              guint         ix,
                              gpointer      mem)
{
  Magazine *mag = &tmem->magazine2[ix];
  ChunkLink *chunk = mem;
  chunk->data = NULL;
  chunk->next = mag->chunks;
  mag->chunks = chunk;
  mag->count++;
}

/* --- API functions --- */
907 908 909 910 911 912 913 914

/**
 * g_slice_new:
 * @type: the type to allocate, typically a structure name
 *
 * A convenience macro to allocate a block of memory from the
 * slice allocator.
 *
Matthias Clasen's avatar
Matthias Clasen committed
915 916 917
 * It calls g_slice_alloc() with `sizeof (@type)` and casts the
 * returned pointer to a pointer of the given type, avoiding a type
 * cast in the source code. Note that the underlying slice allocation
Matthias Clasen's avatar
Matthias Clasen committed
918
 * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
919 920
 * environment variable.
 *
921 922 923 924 925
 * This can never return %NULL as the minimum allocation size from
 * `sizeof (@type)` is 1 byte.
 *
 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
 *    to @type
926 927 928 929 930 931 932 933 934 935 936
 *
 * Since: 2.10
 */

/**
 * g_slice_new0:
 * @type: the type to allocate, typically a structure name
 *
 * A convenience macro to allocate a block of memory from the
 * slice allocator and set the memory to 0.
 *
Matthias Clasen's avatar
Matthias Clasen committed
937
 * It calls g_slice_alloc0() with `sizeof (@type)`
938 939 940
 * and casts the returned pointer to a pointer of the given type,
 * avoiding a type cast in the source code.
 * Note that the underlying slice allocation mechanism can
Matthias Clasen's avatar
Matthias Clasen committed
941
 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
942 943
 * environment variable.
 *
944 945 946 947 948 949
 * This can never return %NULL as the minimum allocation size from
 * `sizeof (@type)` is 1 byte.
 *
 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
 *    to @type
 *
950 951 952 953 954 955
 * Since: 2.10
 */

/**
 * g_slice_dup:
 * @type: the type to duplicate, typically a structure name
956
 * @mem: (not nullable): the memory to copy into the allocated block
957 958 959 960
 *
 * A convenience macro to duplicate a block of memory using
 * the slice allocator.
 *
Matthias Clasen's avatar
Matthias Clasen committed
961
 * It calls g_slice_copy() with `sizeof (@type)`
962 963 964
 * and casts the returned pointer to a pointer of the given type,
 * avoiding a type cast in the source code.
 * Note that the underlying slice allocation mechanism can
Matthias Clasen's avatar
Matthias Clasen committed
965
 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
966 967
 * environment variable.
 *
968 969 970 971
 * This can never return %NULL.
 *
 * Returns: (not nullable): a pointer to the allocated block, cast to a pointer
 *    to @type
972 973 974 975 976 977 978 979 980 981 982 983
 *
 * Since: 2.14
 */

/**
 * g_slice_free:
 * @type: the type of the block to free, typically a structure name
 * @mem: a pointer to the block to free
 *
 * A convenience macro to free a block of memory that has
 * been allocated from the slice allocator.
 *
Matthias Clasen's avatar
Matthias Clasen committed
984
 * It calls g_slice_free1() using `sizeof (type)`
985 986
 * as the block size.
 * Note that the exact release behaviour can be changed with the
Matthias Clasen's avatar
Matthias Clasen committed
987 988
 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
 * [`G_SLICE`][G_SLICE] for related debugging options.
989
 *
990 991
 * If @mem is %NULL, this macro does nothing.
 *
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
 * Since: 2.10
 */

/**
 * g_slice_free_chain:
 * @type: the type of the @mem_chain blocks
 * @mem_chain: a pointer to the first block of the chain
 * @next: the field name of the next pointer in @type
 *
 * Frees a linked list of memory blocks of structure type @type.
 * The memory blocks must be equal-sized, allocated via
 * g_slice_alloc() or g_slice_alloc0() and linked together by
 * a @next pointer (similar to #GSList). The name of the
 * @next field in @type is passed as third argument.
 * Note that the exact release behaviour can be changed with the
Matthias Clasen's avatar
Matthias Clasen committed
1007 1008
 * [`G_DEBUG=gc-friendly`][G_DEBUG] environment variable, also see
 * [`G_SLICE`][G_SLICE] for related debugging options.
1009
 *
1010 1011
 * If @mem_chain is %NULL, this function does nothing.
 *
1012 1013 1014 1015 1016 1017 1018 1019
 * Since: 2.10
 */

/**
 * g_slice_alloc:
 * @block_size: the number of bytes to allocate
 *
 * Allocates a block of memory from the slice allocator.
1020
 * The block address handed out can be expected to be aligned
Matthias Clasen's avatar
Matthias Clasen committed
1021
 * to at least 1 * sizeof (void*),
1022 1023 1024 1025
 * though in general slices are 2 * sizeof (void*) bytes aligned,
 * if a malloc() fallback implementation is used instead,
 * the alignment may be reduced in a libc dependent fashion.
 * Note that the underlying slice allocation mechanism can
Matthias Clasen's avatar
Matthias Clasen committed
1026
 * be changed with the [`G_SLICE=always-malloc`][G_SLICE]
1027 1028
 * environment variable.
 *
1029 1030
 * Returns: a pointer to the allocated memory block, which will be %NULL if and
 *    only if @mem_size is 0
1031 1032 1033
 *
 * Since: 2.10
 */
1034 1035 1036
gpointer
g_slice_alloc (gsize mem_size)
{
Allison Karlitskaya's avatar
Allison Karlitskaya committed
1037
  ThreadMemory *tmem;
1038 1039 1040
  gsize chunk_size;
  gpointer mem;
  guint acat;
Allison Karlitskaya's avatar
Allison Karlitskaya committed
1041 1042 1043 1044 1045 1046 1047 1048 1049

  /* This gets the private structure for this thread.  If the private
   * structure does not yet exist, it is created.
   *
   * This has a side effect of causing GSlice to be initialised, so it
   * must come first.
   */
  tmem = thread_memory_from_self ();

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
  chunk_size = P2ALIGN (mem_size);
  acat = allocator_categorize (chunk_size);
  if (G_LIKELY (acat == 1))     /* allocate through magazine layer */
    {
      guint ix = SLAB_INDEX (allocator, chunk_size);
      if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
        {
          thread_memory_swap_magazines (tmem, ix);
          if (G_UNLIKELY (thread_memory_magazine1_is_empty (tmem, ix)))
            thread_memory_magazine1_reload (tmem, ix);
        }
      mem = thread_memory_magazine1_alloc (tmem, ix);
    }
  else if (acat == 2)           /* allocate through slab allocator */
    {
1065
      g_mutex_lock (&allocator->slab_mutex);
1066
      mem = slab_allocator_alloc_chunk (chunk_size);
1067
      g_mutex_unlock (&allocator->slab_mutex);
1068 1069 1070
    }
  else                          /* delegate to system malloc */
    mem = g_malloc (mem_size);
1071 1072
  if (G_UNLIKELY (allocator->config.debug_blocks))
    smc_notify_alloc (mem, mem_size);
1073 1074 1075

  TRACE (GLIB_SLICE_ALLOC((void*)mem, mem_size));

1076 1077 1078
  return mem;
}

1079 1080 1081 1082 1083 1084
/**
 * g_slice_alloc0:
 * @block_size: the number of bytes to allocate
 *
 * Allocates a block of memory via g_slice_alloc() and initializes
 * the returned memory to 0. Note that the underlying slice allocation
Matthias Clasen's avatar
Matthias Clasen committed
1085
 * mechanism can be changed with the [`G_SLICE=always-malloc`][G_SLICE]
1086 1087
 * environment variable.
 *
1088 1089
 * Returns: a pointer to the allocated block, which will be %NULL if and only
 *    if @mem_size is 0
1090 1091 1092
 *
 * Since: 2.10
 */
1093
gpointer
Tim Janik's avatar
Tim Janik committed
1094
g_slice_alloc0 (gsize mem_size)
1095 1096 1097 1098 1099 1100 1101
{
  gpointer mem = g_slice_alloc (mem_size);
  if (mem)
    memset (mem, 0, mem_size);
  return mem;
}

1102 1103 1104 1105 1106 1107 1108 1109
/**
 * g_slice_copy:
 * @block_size: the number of bytes to allocate
 * @mem_block: the memory to copy
 *
 * Allocates a block of memory from the slice allocator
 * and copies @block_size bytes into it from @mem_block.
 *
1110 1111 1112 1113
 * @mem_block must be non-%NULL if @block_size is non-zero.
 *
 * Returns: a pointer to the allocated memory block, which will be %NULL if and
 *    only if @mem_size is 0
1114 1115 1116
 *
 * Since: 2.14
 */
1117
gpointer
1118 1119
g_slice_copy (gsize         mem_size,
              gconstpointer mem_block)
1120 1121 1122 1123 1124 1125 1126
{
  gpointer mem = g_slice_alloc (mem_size);
  if (mem)
    memcpy (mem, mem_block, mem_size);
  return mem;
}

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
/**
 * g_slice_free1:
 * @block_size: the size of the block
 * @mem_block: a pointer to the block to free
 *
 * Frees a block of memory.
 *
 * The memory must have been allocated via g_slice_alloc() or
 * g_slice_alloc0() and the @block_size has to match the size
 * specified upon allocation. Note that the exact release behaviour
Matthias Clasen's avatar
Matthias Clasen committed
1137 1138
 * can be changed with the [`G_DEBUG=gc-friendly`][G_DEBUG] environment
 * variable, also see [`G_SLICE`][G_SLICE] for related debugging options.
1139
 *
1140 1141
 * If @mem_block is %NULL, this function does nothing.
 *
1142 1143
 * Since: 2.10
 */
1144
void
Tim Janik's avatar
Tim Janik committed
1145
g_slice_free1 (gsize    mem_size,
1146 1147
               gpointer mem_block)
{
Tim Janik's avatar
Tim Janik committed
1148
  gsize chunk_size = P2ALIGN (mem_size);
1149 1150
  guint acat = allocator_categorize (chunk_size);
  if (G_UNLIKELY (!mem_block))
1151
    return;
1152 1153 1154
  if (G_UNLIKELY (allocator->config.debug_blocks) &&
      !smc_notify_free (mem_block, mem_size))
    abort();
1155
  if (G_LIKELY (acat == 1))             /* allocate through magazine layer */
1156 1157 1158 1159 1160 1161 1162 1163 1164
    {
      ThreadMemory *tmem = thread_memory_from_self();
      guint ix = SLAB_INDEX (allocator, chunk_size);
      if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
        {
          thread_memory_swap_magazines (tmem, ix);
          if (G_UNLIKELY (thread_memory_magazine2_is_full (tmem, ix)))
            thread_memory_magazine2_unload (tmem, ix);
        }
1165 1166
      if (G_UNLIKELY (g_mem_gc_friendly))
        memset (mem_block, 0, chunk_size);
1167 1168 1169 1170
      thread_memory_magazine2_free (tmem, ix, mem_block);
    }
  else if (acat == 2)                   /* allocate through slab allocator */
    {
1171 1172
      if (G_UNLIKELY (g_mem_gc_friendly))
        memset (mem_block, 0, chunk_size);
1173
      g_mutex_lock (&allocator->slab_mutex);
1174
      slab_allocator_free_chunk (chunk_size, mem_block);
1175
      g_mutex_unlock (&allocator->slab_mutex);
1176
    }
1177
  else                                  /* delegate to system malloc */
1178 1179 1180 1181 1182
    {
      if (G_UNLIKELY (g_mem_gc_friendly))
        memset (mem_block, 0, mem_size);
      g_free (mem_block);
    }
1183
  TRACE (GLIB_SLICE_FREE((void*)mem_block, mem_size));
1184 1185
}