aom: Add SVE2 implementation of HBD convolve_y_sr for 8-tap filters

From e544a33b7fcf28ba2c40f49d92fdd86639ab692c Mon Sep 17 00:00:00 2001
From: Salome Thirot <[EMAIL REDACTED]>
Date: Mon, 12 Feb 2024 15:36:49 +0000
Subject: [PATCH] Add SVE2 implementation of HBD convolve_y_sr for 8-tap
 filters

Add SVE2 implementation of av1_highbd_convolve_y_sr for 8-tap filters as
well as the corresponding tests.

Change-Id: I2e7d146f99b503e4f8ee885803ea53cf993afee1
---
 av1/common/arm/highbd_convolve_sve2.c | 293 ++++++++++++++++++++++++++
 av1/common/av1_rtcd_defs.pl           |   2 +-
 test/av1_convolve_test.cc             |   5 +
 3 files changed, 299 insertions(+), 1 deletion(-)

diff --git a/av1/common/arm/highbd_convolve_sve2.c b/av1/common/arm/highbd_convolve_sve2.c
index 675fc8743..721557894 100644
--- a/av1/common/arm/highbd_convolve_sve2.c
+++ b/av1/common/arm/highbd_convolve_sve2.c
@@ -397,3 +397,296 @@ void av1_highbd_convolve_x_sr_sve2(const uint16_t *src, int src_stride,
   highbd_convolve_x_sr_4tap_sve2(src + 2, src_stride, dst, dst_stride, w, h,
                                  x_filter_ptr, conv_params, bd);
 }
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdMergeBlockTbl[24]) = {
+  // Shift left and insert new last column in transposed 4x4 block.
+  1, 2, 3, 0, 5, 6, 7, 4,
+  // Shift left and insert two new columns in transposed 4x4 block.
+  2, 3, 0, 1, 6, 7, 4, 5,
+  // Shift left and insert three new columns in transposed 4x4 block.
+  3, 0, 1, 2, 7, 4, 5, 6,
+};
+// clang-format on
+
+static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1,
+                                        int16x4_t s2, int16x4_t s3,
+                                        int16x8_t res[2]) {
+  // Transpose 16-bit elements and concatenate result rows as follows:
+  // s0: 00, 01, 02, 03
+  // s1: 10, 11, 12, 13
+  // s2: 20, 21, 22, 23
+  // s3: 30, 31, 32, 33
+  //
+  // res[0]: 00 10 20 30 01 11 21 31
+  // res[1]: 02 12 22 32 03 13 23 33
+
+  int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0));
+  int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0));
+  int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0));
+  int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0));
+
+  int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q));
+  int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q));
+
+  int32x4x2_t s0123 = vzipq_s32(s01, s23);
+
+  res[0] = vreinterpretq_s16_s32(s0123.val[0]);
+  res[1] = vreinterpretq_s16_s32(s0123.val[1]);
+}
+
+static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1,
+                                        int16x8_t s2, int16x8_t s3,
+                                        int16x8_t res[4]) {
+  // Transpose 16-bit elements and concatenate result rows as follows:
+  // s0: 00, 01, 02, 03, 04, 05, 06, 07
+  // s1: 10, 11, 12, 13, 14, 15, 16, 17
+  // s2: 20, 21, 22, 23, 24, 25, 26, 27
+  // s3: 30, 31, 32, 33, 34, 35, 36, 37
+  //
+  // res[0]: 00 10 20 30 01 11 21 31
+  // res[1]: 02 12 22 32 03 13 23 33
+  // res[2]: 04 14 24 34 05 15 25 35
+  // res[3]: 06 16 26 36 07 17 27 37
+
+  int16x8x2_t tr01_16 = vzipq_s16(s0, s1);
+  int16x8x2_t tr23_16 = vzipq_s16(s2, s3);
+
+  int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]),
+                                  vreinterpretq_s32_s16(tr23_16.val[0]));
+  int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]),
+                                  vreinterpretq_s32_s16(tr23_16.val[1]));
+
+  res[0] = vreinterpretq_s16_s32(tr01_32.val[0]);
+  res[1] = vreinterpretq_s16_s32(tr01_32.val[1]);
+  res[2] = vreinterpretq_s16_s32(tr23_32.val[0]);
+  res[3] = vreinterpretq_s16_s32(tr23_32.val[1]);
+}
+
+static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4],
+                                  uint16x8_t tbl, int16x8_t res[4]) {
+  res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+  res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+  res[2] = aom_tbl2_s16(t0[2], t1[2], tbl);
+  res[3] = aom_tbl2_s16(t0[3], t1[3], tbl);
+}
+
+static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2],
+                                  uint16x8_t tbl, int16x8_t res[2]) {
+  res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+  res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+}
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+                                              int16x8_t samples_hi[2],
+                                              int16x8_t filter,
+                                              uint16x4_t max) {
+  int64x2_t sum01 =
+      aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+  sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+  int64x2_t sum23 =
+      aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+  sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+  int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+  uint16x4_t res = vqrshrun_n_s32(sum0123, FILTER_BITS);
+  return vmin_u16(res, max);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+                                              int16x8_t samples_hi[4],
+                                              int16x8_t filter,
+                                              uint16x8_t max) {
+  int64x2_t sum01 =
+      aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[0], filter, 0);
+  sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+  int64x2_t sum23 =
+      aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[1], filter, 0);
+  sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+  int64x2_t sum45 =
+      aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[2], filter, 0);
+  sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+  int64x2_t sum67 =
+      aom_svdot_lane_s16(vdupq_n_s64(0), samples_lo[3], filter, 0);
+  sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+  int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+  int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+  uint16x8_t res = vcombine_u16(vqrshrun_n_s32(sum0123, FILTER_BITS),
+                                vqrshrun_n_s32(sum4567, FILTER_BITS));
+  return vminq_u16(res, max);
+}
+
+void highbd_convolve_y_sr_8tap_sve2(const uint16_t *src, ptrdiff_t src_stride,
+                                    uint16_t *dst, ptrdiff_t dst_stride,
+                                    int width, int height,
+                                    const int16_t *filter_y, int bd) {
+  assert(w >= 4 && h >= 4);
+
+  const int16x8_t y_filter = vld1q_s16(filter_y);
+
+  uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+  // Scale indices by size of the true vector length to avoid reading from an
+  // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+  uint16x8_t correction0 =
+      vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+  merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+
+  uint16x8_t correction1 =
+      vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+  merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+  uint16x8_t correction2 =
+      vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+  merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+  if (width == 4) {
+    const uint16x4_t max = vdup_n_u16((1 << bd) - 1);
+    int16_t *s = (int16_t *)src;
+
+    int16x4_t s0, s1, s2, s3, s4, s5, s6;
+    load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+    s += 7 * src_stride;
+
+    // This operation combines a conventional transpose and the sample permute
+    // required before computing the dot product.
+    int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+    transpose_concat_4x4(s0, s1, s2, s3, s0123);
+    transpose_concat_4x4(s1, s2, s3, s4, s1234);
+    transpose_concat_4x4(s2, s3, s4, s5, s2345);
+    transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+    do {
+      int16x4_t s7, s8, s9, s10;
+      load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+      int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+      // Transpose and shuffle the 4 lines that were loaded.
+      transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+      // Merge new data into block from previous iteration.
+      aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+      aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+      aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+      uint16x4_t d0 = highbd_convolve8_4_y(s0123, s4567, y_filter, max);
+      uint16x4_t d1 = highbd_convolve8_4_y(s1234, s5678, y_filter, max);
+      uint16x4_t d2 = highbd_convolve8_4_y(s2345, s6789, y_filter, max);
+      uint16x4_t d3 = highbd_convolve8_4_y(s3456, s789A, y_filter, max);
+
+      store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+      // Prepare block for next iteration - re-using as much as possible.
+      // Shuffle everything up four rows.
+      s0123[0] = s4567[0];
+      s0123[1] = s4567[1];
+      s1234[0] = s5678[0];
+      s1234[1] = s5678[1];
+      s2345[0] = s6789[0];
+      s2345[1] = s6789[1];
+      s3456[0] = s789A[0];
+      s3456[1] = s789A[1];
+      s += 4 * src_stride;
+      dst += 4 * dst_stride;
+      height -= 4;
+    } while (height != 0);
+  } else {
+    const uint16x8_t max = vdupq_n_u16((1 << bd) - 1);
+
+    do {
+      int h = height;
+      int16_t *s = (int16_t *)src;
+      uint16_t *d = dst;
+
+      int16x8_t s0, s1, s2, s3, s4, s5, s6;
+      load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+      s += 7 * src_stride;
+
+      // This operation combines a conventional transpose and the sample permute
+      // required before computing the dot product.
+      int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+      transpose_concat_8x4(s0, s1, s2, s3, s0123);
+      transpose_concat_8x4(s1, s2, s3, s4, s1234);
+      transpose_concat_8x4(s2, s3, s4, s5, s2345);
+      transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+      do {
+        int16x8_t s7, s8, s9, s10;
+        load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+        int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+        // Transpose and shuffle the 4 lines that were loaded.
+        transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+        // Merge new data into block from previous iteration.
+        aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+        aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+        aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+        uint16x8_t d0 = highbd_convolve8_8_y(s0123, s4567, y_filter, max);
+        uint16x8_t d1 = highbd_convolve8_8_y(s1234, s5678, y_filter, max);
+        uint16x8_t d2 = highbd_convolve8_8_y(s2345, s6789, y_filter, max);
+        uint16x8_t d3 = highbd_convolve8_8_y(s3456, s789A, y_filter, max);
+
+        store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+        // Prepare block for next iteration - re-using as much as possible.
+        // Shuffle everything up four rows.
+        s0123[0] = s4567[0];
+        s0123[1] = s4567[1];
+        s0123[2] = s4567[2];
+        s0123[3] = s4567[3];
+        s1234[0] = s5678[0];
+        s1234[1] = s5678[1];
+        s1234[2] = s5678[2];
+        s1234[3] = s5678[3];
+        s2345[0] = s6789[0];
+        s2345[1] = s6789[1];
+        s2345[2] = s6789[2];
+        s2345[3] = s6789[3];
+        s3456[0] = s789A[0];
+        s3456[1] = s789A[1];
+        s3456[2] = s789A[2];
+        s3456[3] = s789A[3];
+
+        s += 4 * src_stride;
+        d += 4 * dst_stride;
+        h -= 4;
+      } while (h != 0);
+      src += 8;
+      dst += 8;
+      width -= 8;
+    } while (width != 0);
+  }
+}
+
+void av1_highbd_convolve_y_sr_sve2(const uint16_t *src, int src_stride,
+                                   uint16_t *dst, int dst_stride, int w, int h,
+                                   const InterpFilterParams *filter_params_y,
+                                   const int subpel_y_qn, int bd) {
+  if (w == 2 || h == 2) {
+    av1_highbd_convolve_y_sr_c(src, src_stride, dst, dst_stride, w, h,
+                               filter_params_y, subpel_y_qn, bd);
+    return;
+  }
+  const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+  if (y_filter_taps != 8) {
+    av1_highbd_convolve_y_sr_neon(src, src_stride, dst, dst_stride, w, h,
+                                  filter_params_y, subpel_y_qn, bd);
+    return;
+  }
+
+  const int vert_offset = filter_params_y->taps / 2 - 1;
+  const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+      filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+  src -= vert_offset * src_stride;
+
+  highbd_convolve_y_sr_8tap_sve2(src, src_stride, dst, dst_stride, w, h,
+                                 y_filter_ptr, bd);
+}
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 4f8f445ef..522bb8127 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -604,7 +604,7 @@ ()
     specialize qw/av1_highbd_convolve_2d_sr_intrabc neon/;
     specialize qw/av1_highbd_convolve_x_sr ssse3 avx2 neon sve2/;
     specialize qw/av1_highbd_convolve_x_sr_intrabc neon/;
-    specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon/;
+    specialize qw/av1_highbd_convolve_y_sr ssse3 avx2 neon sve2/;
     specialize qw/av1_highbd_convolve_y_sr_intrabc neon/;
     specialize qw/av1_highbd_convolve_2d_scale sse4_1 neon/;
   }
diff --git a/test/av1_convolve_test.cc b/test/av1_convolve_test.cc
index 00a07ddae..365703955 100644
--- a/test/av1_convolve_test.cc
+++ b/test/av1_convolve_test.cc
@@ -1003,6 +1003,11 @@ INSTANTIATE_TEST_SUITE_P(NEON, AV1ConvolveYHighbdTest,
                          BuildHighbdParams(av1_highbd_convolve_y_sr_neon));
 #endif
 
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(SVE2, AV1ConvolveYHighbdTest,
+                         BuildHighbdParams(av1_highbd_convolve_y_sr_sve2));
+#endif
+
 /////////////////////////////////////////////////////////////////
 // Single reference convolve-y IntraBC functions (high bit-depth)
 /////////////////////////////////////////////////////////////////