2 /* filter_neon_intrinsics.c - NEON optimised filter functions
4 * Copyright (c) 2018 Cosmin Truta
5 * Copyright (c) 2014,2016 Glenn Randers-Pehrson
6 * Written by James Yu <james.yu at linaro.org>, October 2013.
7 * Based on filter_neon.S, written by Mans Rullgard, 2011.
9 * This code is released under the libpng license.
10 * For conditions of distribution and use, see the disclaimer
11 * and license in png.h
14 #include "../pngpriv.h"
16 #ifdef PNG_READ_SUPPORTED
18 /* This code requires -mfpu=neon on the command line: */
19 #if PNG_ARM_NEON_IMPLEMENTATION == 1 /* intrinsics code from pngpriv.h */
21 #if defined(_MSC_VER) && !defined(__clang__) && defined(_M_ARM64)
22 # include <arm64_neon.h>
24 # include <arm_neon.h>
27 /* libpng row pointers are not necessarily aligned to any particular boundary,
28 * however this code will only work with appropriate alignment. arm/arm_init.c
29 * checks for this (and will not compile unless it is done). This code uses
30 * variants of png_aligncast to avoid compiler warnings.
32 #define png_ptr(type,pointer) png_aligncast(type *,pointer)
33 #define png_ptrc(type,pointer) png_aligncastconst(const type *,pointer)
35 /* The following relies on a variable 'temp_pointer' being declared with type
36 * 'type'. This is written this way just to hide the GCC strict aliasing
37 * warning; note that the code is safe because there never is an alias between
38 * the input and output pointers.
40 * When compiling with MSVC ARM64, the png_ldr macro can't be passed directly
41 * to vst4_lane_u32, because of an internal compiler error inside MSVC.
42 * To avoid this compiler bug, we use a temporary variable (vdest_val) to store
43 * the result of png_ldr.
45 #define png_ldr(type,pointer)\
46 (temp_pointer = png_ptr(type,pointer), *temp_pointer)
48 #if PNG_ARM_NEON_OPT > 0
51 png_read_filter_row_up_neon(png_row_infop row_info, png_bytep row,
52 png_const_bytep prev_row)
55 png_bytep rp_stop = row + row_info->rowbytes;
56 png_const_bytep pp = prev_row;
58 png_debug(1, "in png_read_filter_row_up_neon");
60 for (; rp < rp_stop; rp += 16, pp += 16)
66 qrp = vaddq_u8(qrp, qpp);
72 png_read_filter_row_sub3_neon(png_row_infop row_info, png_bytep row,
73 png_const_bytep prev_row)
76 png_bytep rp_stop = row + row_info->rowbytes;
78 uint8x16_t vtmp = vld1q_u8(rp);
79 uint8x8x2_t *vrpt = png_ptr(uint8x8x2_t, &vtmp);
80 uint8x8x2_t vrp = *vrpt;
83 vdest.val[3] = vdup_n_u8(0);
85 png_debug(1, "in png_read_filter_row_sub3_neon");
89 uint8x8_t vtmp1, vtmp2;
90 uint32x2_t *temp_pointer;
92 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
93 vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]);
94 vtmp2 = vext_u8(vrp.val[0], vrp.val[1], 6);
95 vdest.val[1] = vadd_u8(vdest.val[0], vtmp1);
97 vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1);
98 vdest.val[2] = vadd_u8(vdest.val[1], vtmp2);
99 vdest.val[3] = vadd_u8(vdest.val[2], vtmp1);
101 vtmp = vld1q_u8(rp + 12);
102 vrpt = png_ptr(uint8x8x2_t, &vtmp);
105 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[0]), 0);
107 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[1]), 0);
109 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[2]), 0);
111 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[3]), 0);
119 png_read_filter_row_sub4_neon(png_row_infop row_info, png_bytep row,
120 png_const_bytep prev_row)
123 png_bytep rp_stop = row + row_info->rowbytes;
126 vdest.val[3] = vdup_n_u8(0);
128 png_debug(1, "in png_read_filter_row_sub4_neon");
130 for (; rp < rp_stop; rp += 16)
132 uint32x2x4_t vtmp = vld4_u32(png_ptr(uint32_t,rp));
133 uint8x8x4_t *vrpt = png_ptr(uint8x8x4_t,&vtmp);
134 uint8x8x4_t vrp = *vrpt;
135 uint32x2x4_t *temp_pointer;
136 uint32x2x4_t vdest_val;
138 vdest.val[0] = vadd_u8(vdest.val[3], vrp.val[0]);
139 vdest.val[1] = vadd_u8(vdest.val[0], vrp.val[1]);
140 vdest.val[2] = vadd_u8(vdest.val[1], vrp.val[2]);
141 vdest.val[3] = vadd_u8(vdest.val[2], vrp.val[3]);
143 vdest_val = png_ldr(uint32x2x4_t, &vdest);
144 vst4_lane_u32(png_ptr(uint32_t,rp), vdest_val, 0);
151 png_read_filter_row_avg3_neon(png_row_infop row_info, png_bytep row,
152 png_const_bytep prev_row)
155 png_const_bytep pp = prev_row;
156 png_bytep rp_stop = row + row_info->rowbytes;
162 vdest.val[3] = vdup_n_u8(0);
165 vrpt = png_ptr(uint8x8x2_t,&vtmp);
168 png_debug(1, "in png_read_filter_row_avg3_neon");
170 for (; rp < rp_stop; pp += 12)
172 uint8x8_t vtmp1, vtmp2, vtmp3;
177 uint32x2_t *temp_pointer;
180 vppt = png_ptr(uint8x8x2_t,&vtmp);
183 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
184 vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]);
185 vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
187 vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 3);
188 vtmp3 = vext_u8(vrp.val[0], vrp.val[1], 6);
189 vdest.val[1] = vhadd_u8(vdest.val[0], vtmp2);
190 vdest.val[1] = vadd_u8(vdest.val[1], vtmp1);
192 vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 6);
193 vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1);
195 vtmp = vld1q_u8(rp + 12);
196 vrpt = png_ptr(uint8x8x2_t,&vtmp);
199 vdest.val[2] = vhadd_u8(vdest.val[1], vtmp2);
200 vdest.val[2] = vadd_u8(vdest.val[2], vtmp3);
202 vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1);
204 vdest.val[3] = vhadd_u8(vdest.val[2], vtmp2);
205 vdest.val[3] = vadd_u8(vdest.val[3], vtmp1);
207 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[0]), 0);
209 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[1]), 0);
211 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[2]), 0);
213 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[3]), 0);
219 png_read_filter_row_avg4_neon(png_row_infop row_info, png_bytep row,
220 png_const_bytep prev_row)
223 png_bytep rp_stop = row + row_info->rowbytes;
224 png_const_bytep pp = prev_row;
227 vdest.val[3] = vdup_n_u8(0);
229 png_debug(1, "in png_read_filter_row_avg4_neon");
231 for (; rp < rp_stop; rp += 16, pp += 16)
234 uint8x8x4_t *vrpt, *vppt;
235 uint8x8x4_t vrp, vpp;
236 uint32x2x4_t *temp_pointer;
237 uint32x2x4_t vdest_val;
239 vtmp = vld4_u32(png_ptr(uint32_t,rp));
240 vrpt = png_ptr(uint8x8x4_t,&vtmp);
242 vtmp = vld4_u32(png_ptrc(uint32_t,pp));
243 vppt = png_ptr(uint8x8x4_t,&vtmp);
246 vdest.val[0] = vhadd_u8(vdest.val[3], vpp.val[0]);
247 vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
248 vdest.val[1] = vhadd_u8(vdest.val[0], vpp.val[1]);
249 vdest.val[1] = vadd_u8(vdest.val[1], vrp.val[1]);
250 vdest.val[2] = vhadd_u8(vdest.val[1], vpp.val[2]);
251 vdest.val[2] = vadd_u8(vdest.val[2], vrp.val[2]);
252 vdest.val[3] = vhadd_u8(vdest.val[2], vpp.val[3]);
253 vdest.val[3] = vadd_u8(vdest.val[3], vrp.val[3]);
255 vdest_val = png_ldr(uint32x2x4_t, &vdest);
256 vst4_lane_u32(png_ptr(uint32_t,rp), vdest_val, 0);
261 paeth(uint8x8_t a, uint8x8_t b, uint8x8_t c)
264 uint16x8_t p1, pa, pb, pc;
266 p1 = vaddl_u8(a, b); /* a + b */
267 pc = vaddl_u8(c, c); /* c * 2 */
268 pa = vabdl_u8(b, c); /* pa */
269 pb = vabdl_u8(a, c); /* pb */
270 pc = vabdq_u16(p1, pc); /* pc */
272 p1 = vcleq_u16(pa, pb); /* pa <= pb */
273 pa = vcleq_u16(pa, pc); /* pa <= pc */
274 pb = vcleq_u16(pb, pc); /* pb <= pc */
276 p1 = vandq_u16(p1, pa); /* pa <= pb && pa <= pc */
281 d = vbsl_u8(d, b, c);
282 e = vbsl_u8(e, a, d);
288 png_read_filter_row_paeth3_neon(png_row_infop row_info, png_bytep row,
289 png_const_bytep prev_row)
292 png_const_bytep pp = prev_row;
293 png_bytep rp_stop = row + row_info->rowbytes;
298 uint8x8_t vlast = vdup_n_u8(0);
300 vdest.val[3] = vdup_n_u8(0);
303 vrpt = png_ptr(uint8x8x2_t,&vtmp);
306 png_debug(1, "in png_read_filter_row_paeth3_neon");
308 for (; rp < rp_stop; pp += 12)
312 uint8x8_t vtmp1, vtmp2, vtmp3;
313 uint32x2_t *temp_pointer;
316 vppt = png_ptr(uint8x8x2_t,&vtmp);
319 vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast);
320 vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
322 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 3);
323 vtmp2 = vext_u8(vpp.val[0], vpp.val[1], 3);
324 vdest.val[1] = paeth(vdest.val[0], vtmp2, vpp.val[0]);
325 vdest.val[1] = vadd_u8(vdest.val[1], vtmp1);
327 vtmp1 = vext_u8(vrp.val[0], vrp.val[1], 6);
328 vtmp3 = vext_u8(vpp.val[0], vpp.val[1], 6);
329 vdest.val[2] = paeth(vdest.val[1], vtmp3, vtmp2);
330 vdest.val[2] = vadd_u8(vdest.val[2], vtmp1);
332 vtmp1 = vext_u8(vrp.val[1], vrp.val[1], 1);
333 vtmp2 = vext_u8(vpp.val[1], vpp.val[1], 1);
335 vtmp = vld1q_u8(rp + 12);
336 vrpt = png_ptr(uint8x8x2_t,&vtmp);
339 vdest.val[3] = paeth(vdest.val[2], vtmp2, vtmp3);
340 vdest.val[3] = vadd_u8(vdest.val[3], vtmp1);
344 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[0]), 0);
346 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[1]), 0);
348 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[2]), 0);
350 vst1_lane_u32(png_ptr(uint32_t,rp), png_ldr(uint32x2_t,&vdest.val[3]), 0);
356 png_read_filter_row_paeth4_neon(png_row_infop row_info, png_bytep row,
357 png_const_bytep prev_row)
360 png_bytep rp_stop = row + row_info->rowbytes;
361 png_const_bytep pp = prev_row;
363 uint8x8_t vlast = vdup_n_u8(0);
365 vdest.val[3] = vdup_n_u8(0);
367 png_debug(1, "in png_read_filter_row_paeth4_neon");
369 for (; rp < rp_stop; rp += 16, pp += 16)
372 uint8x8x4_t *vrpt, *vppt;
373 uint8x8x4_t vrp, vpp;
374 uint32x2x4_t *temp_pointer;
375 uint32x2x4_t vdest_val;
377 vtmp = vld4_u32(png_ptr(uint32_t,rp));
378 vrpt = png_ptr(uint8x8x4_t,&vtmp);
380 vtmp = vld4_u32(png_ptrc(uint32_t,pp));
381 vppt = png_ptr(uint8x8x4_t,&vtmp);
384 vdest.val[0] = paeth(vdest.val[3], vpp.val[0], vlast);
385 vdest.val[0] = vadd_u8(vdest.val[0], vrp.val[0]);
386 vdest.val[1] = paeth(vdest.val[0], vpp.val[1], vpp.val[0]);
387 vdest.val[1] = vadd_u8(vdest.val[1], vrp.val[1]);
388 vdest.val[2] = paeth(vdest.val[1], vpp.val[2], vpp.val[1]);
389 vdest.val[2] = vadd_u8(vdest.val[2], vrp.val[2]);
390 vdest.val[3] = paeth(vdest.val[2], vpp.val[3], vpp.val[2]);
391 vdest.val[3] = vadd_u8(vdest.val[3], vrp.val[3]);
395 vdest_val = png_ldr(uint32x2x4_t, &vdest);
396 vst4_lane_u32(png_ptr(uint32_t,rp), vdest_val, 0);
400 #endif /* PNG_ARM_NEON_OPT > 0 */
401 #endif /* PNG_ARM_NEON_IMPLEMENTATION == 1 (intrinsics) */