Commit ed055fa6 authored by Helvetix Victorinox's avatar Helvetix Victorinox

app/composite/gimp-composite-x86.h app/composite/gimp-composite-sse.c

* app/composite/gimp-composite-x86.h
* app/composite/gimp-composite-sse.c
* app/composite/gimp-composite-sse2.c: Fixed a bunch of
  warnings due to bad type casting.
parent cbdcd0ee
2004-08-29 Helvetix Victorinox <helvetix@gimp.org> 2004-08-29 Helvetix Victorinox <helvetix@gimp.org>
* app/composite/gimp-composite-x86.h
* app/composite/gimp-composite-sse.c
* app/composite/gimp-composite-sse2.c: Fixed a bunch of
warnings due to bad type casting.
* app/composite/gimp-composite-mmx.c * app/composite/gimp-composite-mmx.c
* app/composite/gimp-composite-sse.c * app/composite/gimp-composite-sse.c
* app/composite/gimp-composite-x86.h * app/composite/gimp-composite-x86.h
......
...@@ -1310,7 +1310,6 @@ gimp_composite_subtract_rgba8_rgba8_rgba8_sse (GimpCompositeContext *_op) ...@@ -1310,7 +1310,6 @@ gimp_composite_subtract_rgba8_rgba8_rgba8_sse (GimpCompositeContext *_op)
void void
gimp_composite_swap_rgba8_rgba8_rgba8_sse (GimpCompositeContext *_op) gimp_composite_swap_rgba8_rgba8_rgba8_sse (GimpCompositeContext *_op)
{ {
uint64 *d = (uint64 *) _op->D;
uint64 *a = (uint64 *) _op->A; uint64 *a = (uint64 *) _op->A;
uint64 *b = (uint64 *) _op->B; uint64 *b = (uint64 *) _op->B;
gulong n_pixels = _op->n_pixels; gulong n_pixels = _op->n_pixels;
......
...@@ -91,9 +91,9 @@ gimp_composite_addition_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -91,9 +91,9 @@ gimp_composite_addition_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
uint64 *d; uint64 *d;
uint64 *a; uint64 *a;
uint64 *b; uint64 *b;
uint128 *D = (uint64 *) _op->D; uint128 *D = (uint128 *) _op->D;
uint128 *A = (uint64 *) _op->A; uint128 *A = (uint128 *) _op->A;
uint128 *B = (uint64 *) _op->B; uint128 *B = (uint128 *) _op->B;
gulong n_pixels = _op->n_pixels; gulong n_pixels = _op->n_pixels;
asm volatile (" movdqu %0,%%xmm0\n" asm volatile (" movdqu %0,%%xmm0\n"
...@@ -123,9 +123,9 @@ gimp_composite_addition_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -123,9 +123,9 @@ gimp_composite_addition_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
D++; D++;
} }
a = A; a = (uint64 *) A;
b = B; b = (uint64 *) B;
d = D; d = (uint64 *) D;
for (; n_pixels >= 2; n_pixels -= 2) for (; n_pixels >= 2; n_pixels -= 2)
{ {
...@@ -182,9 +182,9 @@ gimp_composite_darken_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -182,9 +182,9 @@ gimp_composite_darken_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
uint64 *d; uint64 *d;
uint64 *a; uint64 *a;
uint64 *b; uint64 *b;
uint128 *D = (uint64 *) _op->D; uint128 *D = (uint128 *) _op->D;
uint128 *A = (uint64 *) _op->A; uint128 *A = (uint128 *) _op->A;
uint128 *B = (uint64 *) _op->B; uint128 *B = (uint128 *) _op->B;
gulong n_pixels = _op->n_pixels; gulong n_pixels = _op->n_pixels;
for (; n_pixels >= 4; n_pixels -= 4) for (; n_pixels >= 4; n_pixels -= 4)
...@@ -201,9 +201,9 @@ gimp_composite_darken_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -201,9 +201,9 @@ gimp_composite_darken_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
D++; D++;
} }
a = A; a = (uint64 *) A;
b = B; b = (uint64 *) B;
d = D; d = (uint64 *) D;
for (; n_pixels >= 2; n_pixels -= 2) for (; n_pixels >= 2; n_pixels -= 2)
{ {
...@@ -238,9 +238,9 @@ gimp_composite_difference_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -238,9 +238,9 @@ gimp_composite_difference_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
uint64 *d; uint64 *d;
uint64 *a; uint64 *a;
uint64 *b; uint64 *b;
uint128 *D = (uint64 *) _op->D; uint128 *D = (uint128 *) _op->D;
uint128 *A = (uint64 *) _op->A; uint128 *A = (uint128 *) _op->A;
uint128 *B = (uint64 *) _op->B; uint128 *B = (uint128 *) _op->B;
gulong n_pixels = _op->n_pixels; gulong n_pixels = _op->n_pixels;
asm volatile (" movq %0,%%mm0\n" asm volatile (" movq %0,%%mm0\n"
...@@ -272,9 +272,9 @@ gimp_composite_difference_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -272,9 +272,9 @@ gimp_composite_difference_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
D++; D++;
} }
a = A; a = (uint64 *) A;
b = B; b = (uint64 *) B;
d = D; d = (uint64 *) D;
for (; n_pixels >= 2; n_pixels -= 2) for (; n_pixels >= 2; n_pixels -= 2)
{ {
...@@ -465,9 +465,9 @@ gimp_composite_grain_extract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -465,9 +465,9 @@ gimp_composite_grain_extract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
uint64 *d; uint64 *d;
uint64 *a; uint64 *a;
uint64 *b; uint64 *b;
uint128 *D = (uint64 *) _op->D; uint128 *D = (uint128 *) _op->D;
uint128 *A = (uint64 *) _op->A; uint128 *A = (uint128 *) _op->A;
uint128 *B = (uint64 *) _op->B; uint128 *B = (uint128 *) _op->B;
gulong n_pixels = _op->n_pixels; gulong n_pixels = _op->n_pixels;
asm volatile (" movq %0,%%mm0\n" asm volatile (" movq %0,%%mm0\n"
...@@ -515,9 +515,9 @@ gimp_composite_grain_extract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -515,9 +515,9 @@ gimp_composite_grain_extract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
D++; D++;
} }
a = A; a = (uint64 *) A;
b = B; b = (uint64 *) B;
d = D; d = (uint64 *) D;
for (; n_pixels >= 2; n_pixels -= 2) for (; n_pixels >= 2; n_pixels -= 2)
{ {
...@@ -585,9 +585,9 @@ gimp_composite_lighten_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -585,9 +585,9 @@ gimp_composite_lighten_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
uint64 *d; uint64 *d;
uint64 *a; uint64 *a;
uint64 *b; uint64 *b;
uint128 *D = (uint64 *) _op->D; uint128 *D = (uint128 *) _op->D;
uint128 *A = (uint64 *) _op->A; uint128 *A = (uint128 *) _op->A;
uint128 *B = (uint64 *) _op->B; uint128 *B = (uint128 *) _op->B;
gulong n_pixels = _op->n_pixels; gulong n_pixels = _op->n_pixels;
asm volatile ("movq %0,%%mm0" : : "m" (*rgba8_alpha_mask_64) : "%mm0"); asm volatile ("movq %0,%%mm0" : : "m" (*rgba8_alpha_mask_64) : "%mm0");
...@@ -612,9 +612,9 @@ gimp_composite_lighten_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -612,9 +612,9 @@ gimp_composite_lighten_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
D++; D++;
} }
a = A; a = (uint64 *) A;
b = B; b = (uint64 *) B;
d = D; d = (uint64 *) D;
for (; n_pixels >= 2; n_pixels -= 2) for (; n_pixels >= 2; n_pixels -= 2)
{ {
...@@ -662,9 +662,9 @@ gimp_composite_subtract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -662,9 +662,9 @@ gimp_composite_subtract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
uint64 *d; uint64 *d;
uint64 *a; uint64 *a;
uint64 *b; uint64 *b;
uint128 *D = (uint64 *) _op->D; uint128 *D = (uint128 *) _op->D;
uint128 *A = (uint64 *) _op->A; uint128 *A = (uint128 *) _op->A;
uint128 *B = (uint64 *) _op->B; uint128 *B = (uint128 *) _op->B;
gulong n_pixels = _op->n_pixels; gulong n_pixels = _op->n_pixels;
asm volatile (" movq %0,%%mm0\n" asm volatile (" movq %0,%%mm0\n"
...@@ -694,9 +694,9 @@ gimp_composite_subtract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op) ...@@ -694,9 +694,9 @@ gimp_composite_subtract_rgba8_rgba8_rgba8_sse2 (GimpCompositeContext *_op)
D++; D++;
} }
a = A; a = (uint64 *) A;
b = B; b = (uint64 *) B;
d = D; d = (uint64 *) D;
for (; n_pixels >= 2; n_pixels -= 2) for (; n_pixels >= 2; n_pixels -= 2)
{ {
......
...@@ -236,7 +236,7 @@ ...@@ -236,7 +236,7 @@
"\tpsrlw $8, %%"#opr2"\n" "\tpsrlw $8, %%"#opr2"\n"
typedef unsigned long long uint64; typedef unsigned long long uint64;
typedef struct { uint64 __uint64[2] } uint128; typedef struct { uint64 __uint64[2]; } uint128;
extern const guint32 rgba8_alpha_mask_64[2]; extern const guint32 rgba8_alpha_mask_64[2];
extern const guint32 rgba8_b1_64[2]; extern const guint32 rgba8_b1_64[2];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment