26 const uint8_t *restrict pixels,
int line_size)
34 stq(unpkbw(p), block);
35 stq(unpkbw(p >> 32), block + 4);
45 uint64_t
mask = 0x4040;
50 uint64_t
x,
y,
c,
d,
a;
61 stq(unpkbw(d) | (unpkbw(signs) << 8), block);
62 stq(unpkbw(d >> 32) | (unpkbw(signs >> 32) << 8), block + 4);
70 static inline uint64_t
avg2(uint64_t
a, uint64_t
b)
72 return (a | b) - (((a ^
b) &
BYTE_VEC(0xfe)) >> 1);
75 static inline uint64_t
avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
77 uint64_t r1 = ((l1 & ~
BYTE_VEC(0x03)) >> 2)
81 uint64_t r2 = (( (l1 &
BYTE_VEC(0x03))
93 if ((
size_t) pix2 & 0x7) {
100 result += perr(p1, p2);
111 result += perr(p1, p2);
127 if ((
size_t) pix2 & 0x7) {
130 uint64_t p1_l, p1_r, p2_l, p2_r;
134 p1_r = ldq(pix1 + 8);
136 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
137 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
141 result += perr(p1_l, p2_l)
146 uint64_t p1_l, p1_r, p2_l, p2_r;
149 p1_r = ldq(pix1 + 8);
151 p2_r = ldq(pix2 + 8);
155 result += perr(p1_l, p2_l)
167 uint64_t disalign = (size_t) pix2 & 0x7;
172 uint64_t p1_l, p1_r, p2_l, p2_r;
176 p1_r = ldq(pix1 + 8);
179 p2_l =
avg2(l, (l >> 8) | ((uint64_t) r << 56));
180 p2_r =
avg2(r, (r >> 8) | ((uint64_t) pix2[16] << 56));
184 result += perr(p1_l, p2_l)
194 uint64_t p1_l, p1_r, p2_l, p2_r;
198 p1_r = ldq(pix1 + 8);
201 r = ldq_u(pix2 + 16);
202 p2_l =
avg2(extql(l, disalign) | extqh(m, disalign), m);
203 p2_r =
avg2(extql(m, disalign) | extqh(r, disalign), r);
207 result += perr(p1_l, p2_l)
213 uint64_t disalign1 = disalign + 1;
214 uint64_t p1_l, p1_r, p2_l, p2_r;
218 p1_r = ldq(pix1 + 8);
221 r = ldq_u(pix2 + 16);
222 p2_l =
avg2(extql(l, disalign) | extqh(m, disalign),
223 extql(l, disalign1) | extqh(m, disalign1));
224 p2_r =
avg2(extql(m, disalign) | extqh(r, disalign),
225 extql(m, disalign1) | extqh(r, disalign1));
229 result += perr(p1_l, p2_l)
241 if ((
size_t) pix2 & 0x7) {
242 uint64_t
t, p2_l, p2_r;
244 p2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
245 p2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
248 uint64_t p1_l, p1_r, np2_l, np2_r;
252 p1_r = ldq(pix1 + 8);
255 np2_l = extql(ldq_u(pix2), pix2) | extqh(t, pix2);
256 np2_r = extql(t, pix2) | extqh(ldq_u(pix2 + 16), pix2);
258 result += perr(p1_l,
avg2(p2_l, np2_l))
259 + perr(p1_r,
avg2(p2_r, np2_r));
269 p2_r = ldq(pix2 + 8);
271 uint64_t p1_l, p1_r, np2_l, np2_r;
274 p1_r = ldq(pix1 + 8);
277 np2_r = ldq(pix2 + 8);
279 result += perr(p1_l,
avg2(p2_l, np2_l))
280 + perr(p1_r,
avg2(p2_r, np2_r));
295 uint64_t p2_l, p2_r, p2_x;
298 p1_r = ldq(pix1 + 8);
300 if ((
size_t) pix2 & 0x7) {
302 p2_r = uldq(pix2 + 8);
303 p2_x = (uint64_t) pix2[16] << 56;
306 p2_r = ldq(pix2 + 8);
307 p2_x = ldq(pix2 + 16) << 56;
311 uint64_t np1_l, np1_r;
312 uint64_t np2_l, np2_r, np2_x;
318 np1_r = ldq(pix1 + 8);
320 if ((
size_t) pix2 & 0x7) {
322 np2_r = uldq(pix2 + 8);
323 np2_x = (uint64_t) pix2[16] << 56;
326 np2_r = ldq(pix2 + 8);
327 np2_x = ldq(pix2 + 16) << 56;
331 avg4( p2_l, ( p2_l >> 8) | ((uint64_t) p2_r << 56),
332 np2_l, (np2_l >> 8) | ((uint64_t) np2_r << 56)))
334 avg4( p2_r, ( p2_r >> 8) | ((uint64_t) p2_x),
335 np2_r, (np2_r >> 8) | ((uint64_t) np2_x)));
int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
static uint64_t avg2(uint64_t a, uint64_t b)
int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
static const uint16_t mask[17]
static uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
void diff_pixels_mvi(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride)
int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
void get_pixels_mvi(int16_t *restrict block, const uint8_t *restrict pixels, int line_size)
static uint64_t BYTE_VEC(uint64_t x)