Commit 7c881a60 authored by Ell's avatar Ell

transform: use gegl_parallel_distribute_area() for parallelization

In GeglTransformCore, use gegl_parallel_distribute_area(), added in
the commit before last, to parallelize processing, instead of a
local thread pool.
parent f14b731d
......@@ -1062,47 +1062,39 @@ typedef struct ThreadData
gint level);
GeglOperation *operation;
GeglOperationContext *context;
GeglBuffer *output;
gint *pending;
GeglMatrix3 *matrix;
gint level;
gboolean success;
GeglRectangle roi;
GeglOperation *operation;
GeglOperationContext *context;
GeglBuffer *input;
GeglBuffer *output;
GeglMatrix3 *matrix;
const GeglRectangle *roi;
gint level;
} ThreadData;
static void thread_process (gpointer thread_data, gpointer input)
static void
thread_process (const GeglRectangle *area,
ThreadData *data)
{
ThreadData *data = thread_data;
GeglBuffer *input;
if (! input)
if (area->x == data->roi->x && area->y == data->roi->y)
{
input = g_object_ref (data->input);
}
else
{
input = gegl_operation_context_dup_input_maybe_copy (data->context,
"input", &data->roi);
"input", area);
}
data->func (data->operation,
data->output,
input,
data->matrix,
&data->roi,
area,
data->level);
g_object_unref (input);
g_atomic_int_add (data->pending, -1);
}
static GThreadPool *thread_pool (void)
{
static GThreadPool *pool = NULL;
if (!pool)
{
pool = g_thread_pool_new (thread_process, NULL, gegl_config_threads(),
FALSE, NULL);
}
return pool;
}
......@@ -1678,53 +1670,23 @@ gegl_transform_process (GeglOperation *operation,
if (gegl_operation_use_threading (operation, result))
{
gint threads = gegl_config_threads ();
GThreadPool *pool = thread_pool ();
ThreadData thread_data[GEGL_MAX_THREADS];
gint pending = threads;
if (result->width > result->height)
{
gint bit = result->width / threads;
for (gint j = 0; j < threads; j++)
{
thread_data[j].roi.y = result->y;
thread_data[j].roi.height = result->height;
thread_data[j].roi.x = result->x + bit * j;
thread_data[j].roi.width = bit;
}
thread_data[threads-1].roi.width = result->width - (bit * (threads-1));
}
else
{
gint bit = result->height / threads;
for (gint j = 0; j < threads; j++)
{
thread_data[j].roi.x = result->x;
thread_data[j].roi.width = result->width;
thread_data[j].roi.y = result->y + bit * j;
thread_data[j].roi.height = bit;
}
thread_data[threads-1].roi.height = result->height - (bit * (threads-1));
}
for (gint i = 0; i < threads; i++)
{
thread_data[i].func = func;
thread_data[i].matrix = &matrix;
thread_data[i].operation = operation;
thread_data[i].context = context;
thread_data[i].output = output;
thread_data[i].pending = &pending;
thread_data[i].level = level;
thread_data[i].success = TRUE;
}
for (gint i = 1; i < threads; i++)
g_thread_pool_push (pool, &thread_data[i], NULL);
thread_process (&thread_data[0], g_object_ref (input));
while (g_atomic_int_get (&pending)) {};
ThreadData data;
data.func = func;
data.matrix = &matrix;
data.operation = operation;
data.context = context;
data.input = input;
data.output = output;
data.roi = result;
data.level = level;
gegl_parallel_distribute_area (
result,
gegl_operation_get_min_threaded_sub_area (operation),
GEGL_SPLIT_STRATEGY_AUTO,
(GeglParallelDistributeAreaFunc) thread_process,
&data);
}
else
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment