aom: Add SVE2 impl of HBD dist_wtd_convolve_y for 8-tap filters

From a8b3d10754b43c1714212d703c3c3d2eb64b464e Mon Sep 17 00:00:00 2001
From: Salome Thirot <[EMAIL REDACTED]>
Date: Fri, 8 Mar 2024 16:14:25 +0000
Subject: [PATCH] Add SVE2 impl of HBD dist_wtd_convolve_y for 8-tap filters

Add SVE2 implementation of av1_highbd_dist_wtd_convolve_y for 8-tap
filters, as well as the corresponding tests.

The helper functions used to shuffle the input data are shared with the
regular vertical convolutions, so move them to a separate header file.

Change-Id: Iba06f88802cb302209c506ffae4f68b690dbb3fa
---
 .../arm/highbd_compound_convolve_sve2.c       | 249 ++++++++++++++++++
 av1/common/arm/highbd_convolve_sve2.c         |  80 +-----
 av1/common/arm/highbd_convolve_sve2.h         |  97 +++++++
 av1/common/av1_rtcd_defs.pl                   |   2 +-
 test/av1_convolve_test.cc                     |   6 +
 5 files changed, 354 insertions(+), 80 deletions(-)
 create mode 100644 av1/common/arm/highbd_convolve_sve2.h

diff --git a/av1/common/arm/highbd_compound_convolve_sve2.c b/av1/common/arm/highbd_compound_convolve_sve2.c
index dc983c5f8..f500110f1 100644
--- a/av1/common/arm/highbd_compound_convolve_sve2.c
+++ b/av1/common/arm/highbd_compound_convolve_sve2.c
@@ -25,6 +25,7 @@
 #include "av1/common/filter.h"
 #include "av1/common/arm/highbd_compound_convolve_neon.h"
 #include "av1/common/arm/highbd_convolve_neon.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
 
 DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
   0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
@@ -436,3 +437,251 @@ void av1_highbd_dist_wtd_convolve_x_sve2(
     }
   }
 }
+
+static INLINE uint16x4_t highbd_convolve8_4_y(int16x8_t samples_lo[2],
+                                              int16x8_t samples_hi[2],
+                                              int16x8_t filter,
+                                              int64x2_t offset,
+                                              int32x4_t shift) {
+  int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+  sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+  int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+  sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+  int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+  sum0123 = vshlq_s32(sum0123, shift);
+
+  return vqmovun_s32(sum0123);
+}
+
+static INLINE uint16x8_t highbd_convolve8_8_y(int16x8_t samples_lo[4],
+                                              int16x8_t samples_hi[4],
+                                              int16x8_t filter,
+                                              int64x2_t offset,
+                                              int32x4_t shift) {
+  int64x2_t sum01 = aom_svdot_lane_s16(offset, samples_lo[0], filter, 0);
+  sum01 = aom_svdot_lane_s16(sum01, samples_hi[0], filter, 1);
+
+  int64x2_t sum23 = aom_svdot_lane_s16(offset, samples_lo[1], filter, 0);
+  sum23 = aom_svdot_lane_s16(sum23, samples_hi[1], filter, 1);
+
+  int64x2_t sum45 = aom_svdot_lane_s16(offset, samples_lo[2], filter, 0);
+  sum45 = aom_svdot_lane_s16(sum45, samples_hi[2], filter, 1);
+
+  int64x2_t sum67 = aom_svdot_lane_s16(offset, samples_lo[3], filter, 0);
+  sum67 = aom_svdot_lane_s16(sum67, samples_hi[3], filter, 1);
+
+  int32x4_t sum0123 = vcombine_s32(vmovn_s64(sum01), vmovn_s64(sum23));
+  sum0123 = vshlq_s32(sum0123, shift);
+
+  int32x4_t sum4567 = vcombine_s32(vmovn_s64(sum45), vmovn_s64(sum67));
+  sum4567 = vshlq_s32(sum4567, shift);
+
+  return vcombine_u16(vqmovun_s32(sum0123), vqmovun_s32(sum4567));
+}
+
+static INLINE void highbd_dist_wtd_convolve_y_8tap_sve2(
+    const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride,
+    int width, int height, const int16_t *y_filter_ptr,
+    ConvolveParams *conv_params, int offset) {
+  const int16x8_t y_filter = vld1q_s16(y_filter_ptr);
+  const int64x2_t offset_s64 = vdupq_n_s64(offset);
+  const int32x4_t shift = vdupq_n_s32(-conv_params->round_0);
+
+  uint16x8x3_t merge_block_tbl = vld1q_u16_x3(kDotProdMergeBlockTbl);
+  // Scale indices by size of the true vector length to avoid reading from an
+  // 'undefined' portion of a vector on a system with SVE vectors > 128-bit.
+  uint16x8_t correction0 =
+      vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000000000000ULL));
+  merge_block_tbl.val[0] = vaddq_u16(merge_block_tbl.val[0], correction0);
+  uint16x8_t correction1 =
+      vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100000000ULL));
+  merge_block_tbl.val[1] = vaddq_u16(merge_block_tbl.val[1], correction1);
+
+  uint16x8_t correction2 =
+      vreinterpretq_u16_u64(vdupq_n_u64(svcnth() * 0x0001000100010000ULL));
+  merge_block_tbl.val[2] = vaddq_u16(merge_block_tbl.val[2], correction2);
+
+  if (width == 4) {
+    int16_t *s = (int16_t *)src;
+    int16x4_t s0, s1, s2, s3, s4, s5, s6;
+    load_s16_4x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+    s += 7 * src_stride;
+
+    // This operation combines a conventional transpose and the sample permute
+    // required before computing the dot product.
+    int16x8_t s0123[2], s1234[2], s2345[2], s3456[2];
+    transpose_concat_4x4(s0, s1, s2, s3, s0123);
+    transpose_concat_4x4(s1, s2, s3, s4, s1234);
+    transpose_concat_4x4(s2, s3, s4, s5, s2345);
+    transpose_concat_4x4(s3, s4, s5, s6, s3456);
+
+    do {
+      int16x4_t s7, s8, s9, s10;
+      load_s16_4x4(s, src_stride, &s7, &s8, &s9, &s10);
+
+      int16x8_t s4567[2], s5678[2], s6789[2], s789A[2];
+      // Transpose and shuffle the 4 lines that were loaded.
+      transpose_concat_4x4(s7, s8, s9, s10, s789A);
+
+      // Merge new data into block from previous iteration.
+      aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+      aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+      aom_tbl2x2_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+      uint16x4_t d0 =
+          highbd_convolve8_4_y(s0123, s4567, y_filter, offset_s64, shift);
+      uint16x4_t d1 =
+          highbd_convolve8_4_y(s1234, s5678, y_filter, offset_s64, shift);
+      uint16x4_t d2 =
+          highbd_convolve8_4_y(s2345, s6789, y_filter, offset_s64, shift);
+      uint16x4_t d3 =
+          highbd_convolve8_4_y(s3456, s789A, y_filter, offset_s64, shift);
+
+      store_u16_4x4(dst, dst_stride, d0, d1, d2, d3);
+
+      // Prepare block for next iteration - re-using as much as possible.
+      // Shuffle everything up four rows.
+      s0123[0] = s4567[0];
+      s0123[1] = s4567[1];
+      s1234[0] = s5678[0];
+      s1234[1] = s5678[1];
+      s2345[0] = s6789[0];
+      s2345[1] = s6789[1];
+      s3456[0] = s789A[0];
+      s3456[1] = s789A[1];
+
+      s += 4 * src_stride;
+      dst += 4 * dst_stride;
+      height -= 4;
+    } while (height != 0);
+  } else {
+    do {
+      int h = height;
+      int16_t *s = (int16_t *)src;
+      uint16_t *d = dst;
+
+      int16x8_t s0, s1, s2, s3, s4, s5, s6;
+      load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
+      s += 7 * src_stride;
+
+      // This operation combines a conventional transpose and the sample permute
+      // required before computing the dot product.
+      int16x8_t s0123[4], s1234[4], s2345[4], s3456[4];
+      transpose_concat_8x4(s0, s1, s2, s3, s0123);
+      transpose_concat_8x4(s1, s2, s3, s4, s1234);
+      transpose_concat_8x4(s2, s3, s4, s5, s2345);
+      transpose_concat_8x4(s3, s4, s5, s6, s3456);
+
+      do {
+        int16x8_t s7, s8, s9, s10;
+        load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
+        int16x8_t s4567[4], s5678[4], s6789[4], s789A[4];
+
+        // Transpose and shuffle the 4 lines that were loaded.
+        transpose_concat_8x4(s7, s8, s9, s10, s789A);
+
+        // Merge new data into block from previous iteration.
+        aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[0], s4567);
+        aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[1], s5678);
+        aom_tbl2x4_s16(s3456, s789A, merge_block_tbl.val[2], s6789);
+
+        uint16x8_t d0 =
+            highbd_convolve8_8_y(s0123, s4567, y_filter, offset_s64, shift);
+        uint16x8_t d1 =
+            highbd_convolve8_8_y(s1234, s5678, y_filter, offset_s64, shift);
+        uint16x8_t d2 =
+            highbd_convolve8_8_y(s2345, s6789, y_filter, offset_s64, shift);
+        uint16x8_t d3 =
+            highbd_convolve8_8_y(s3456, s789A, y_filter, offset_s64, shift);
+
+        store_u16_8x4(d, dst_stride, d0, d1, d2, d3);
+
+        // Prepare block for next iteration - re-using as much as possible.
+        // Shuffle everything up four rows.
+        s0123[0] = s4567[0];
+        s0123[1] = s4567[1];
+        s0123[2] = s4567[2];
+        s0123[3] = s4567[3];
+        s1234[0] = s5678[0];
+        s1234[1] = s5678[1];
+        s1234[2] = s5678[2];
+        s1234[3] = s5678[3];
+        s2345[0] = s6789[0];
+        s2345[1] = s6789[1];
+        s2345[2] = s6789[2];
+        s2345[3] = s6789[3];
+        s3456[0] = s789A[0];
+        s3456[1] = s789A[1];
+        s3456[2] = s789A[2];
+        s3456[3] = s789A[3];
+
+        s += 4 * src_stride;
+        d += 4 * dst_stride;
+        h -= 4;
+      } while (h != 0);
+      src += 8;
+      dst += 8;
+      width -= 8;
+    } while (width != 0);
+  }
+}
+
+void av1_highbd_dist_wtd_convolve_y_sve2(
+    const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w,
+    int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn,
+    ConvolveParams *conv_params, int bd) {
+  DECLARE_ALIGNED(16, uint16_t,
+                  im_block[(MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]);
+  CONV_BUF_TYPE *dst16 = conv_params->dst;
+  const int y_filter_taps = get_filter_tap(filter_params_y, subpel_y_qn);
+
+  if (y_filter_taps != 8) {
+    av1_highbd_dist_wtd_convolve_y_neon(src, src_stride, dst, dst_stride, w, h,
+                                        filter_params_y, subpel_y_qn,
+                                        conv_params, bd);
+    return;
+  }
+
+  int dst16_stride = conv_params->dst_stride;
+  const int im_stride = MAX_SB_SIZE;
+  const int vert_offset = filter_params_y->taps / 2 - 1;
+  assert(FILTER_BITS == COMPOUND_ROUND1_BITS);
+  const int round_offset_conv = (1 << (conv_params->round_0 - 1)) +
+                                (1 << (bd + FILTER_BITS)) +
+                                (1 << (bd + FILTER_BITS - 1));
+
+  const int16_t *y_filter_ptr = av1_get_interp_filter_subpel_kernel(
+      filter_params_y, subpel_y_qn & SUBPEL_MASK);
+
+  src -= vert_offset * src_stride;
+
+  if (conv_params->do_average) {
+    highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, im_block, im_stride,
+                                         w, h, y_filter_ptr, conv_params,
+                                         round_offset_conv);
+    if (conv_params->use_dist_wtd_comp_avg) {
+      if (bd == 12) {
+        highbd_12_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride,
+                                         w, h, conv_params);
+      } else {
+        highbd_dist_wtd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w,
+                                      h, conv_params, bd);
+      }
+    } else {
+      if (bd == 12) {
+        highbd_12_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+                                conv_params);
+
+      } else {
+        highbd_comp_avg_neon(im_block, im_stride, dst, dst_stride, w, h,
+                             conv_params, bd);
+      }
+    }
+  } else {
+    highbd_dist_wtd_convolve_y_8tap_sve2(src, src_stride, dst16, dst16_stride,
+                                         w, h, y_filter_ptr, conv_params,
+                                         round_offset_conv);
+  }
+}
diff --git a/av1/common/arm/highbd_convolve_sve2.c b/av1/common/arm/highbd_convolve_sve2.c
index 1cb1086b5..82eb12fce 100644
--- a/av1/common/arm/highbd_convolve_sve2.c
+++ b/av1/common/arm/highbd_convolve_sve2.c
@@ -22,6 +22,7 @@
 #include "aom_ports/mem.h"
 #include "av1/common/convolve.h"
 #include "av1/common/filter.h"
+#include "av1/common/arm/highbd_convolve_sve2.h"
 
 DECLARE_ALIGNED(16, static const uint16_t, kDotProdTbl[32]) = {
   0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6,
@@ -398,85 +399,6 @@ void av1_highbd_convolve_x_sr_sve2(const uint16_t *src, int src_stride,
                                  x_filter_ptr, conv_params, bd);
 }
 
-// clang-format off
-DECLARE_ALIGNED(16, static const uint16_t, kDotProdMergeBlockTbl[24]) = {
-  // Shift left and insert new last column in transposed 4x4 block.
-  1, 2, 3, 0, 5, 6, 7, 4,
-  // Shift left and insert two new columns in transposed 4x4 block.
-  2, 3, 0, 1, 6, 7, 4, 5,
-  // Shift left and insert three new columns in transposed 4x4 block.
-  3, 0, 1, 2, 7, 4, 5, 6,
-};
-// clang-format on
-
-static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1,
-                                        int16x4_t s2, int16x4_t s3,
-                                        int16x8_t res[2]) {
-  // Transpose 16-bit elements and concatenate result rows as follows:
-  // s0: 00, 01, 02, 03
-  // s1: 10, 11, 12, 13
-  // s2: 20, 21, 22, 23
-  // s3: 30, 31, 32, 33
-  //
-  // res[0]: 00 10 20 30 01 11 21 31
-  // res[1]: 02 12 22 32 03 13 23 33
-
-  int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0));
-  int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0));
-  int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0));
-  int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0));
-
-  int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q));
-  int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q));
-
-  int32x4x2_t s0123 = vzipq_s32(s01, s23);
-
-  res[0] = vreinterpretq_s16_s32(s0123.val[0]);
-  res[1] = vreinterpretq_s16_s32(s0123.val[1]);
-}
-
-static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1,
-                                        int16x8_t s2, int16x8_t s3,
-                                        int16x8_t res[4]) {
-  // Transpose 16-bit elements and concatenate result rows as follows:
-  // s0: 00, 01, 02, 03, 04, 05, 06, 07
-  // s1: 10, 11, 12, 13, 14, 15, 16, 17
-  // s2: 20, 21, 22, 23, 24, 25, 26, 27
-  // s3: 30, 31, 32, 33, 34, 35, 36, 37
-  //
-  // res[0]: 00 10 20 30 01 11 21 31
-  // res[1]: 02 12 22 32 03 13 23 33
-  // res[2]: 04 14 24 34 05 15 25 35
-  // res[3]: 06 16 26 36 07 17 27 37
-
-  int16x8x2_t tr01_16 = vzipq_s16(s0, s1);
-  int16x8x2_t tr23_16 = vzipq_s16(s2, s3);
-
-  int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]),
-                                  vreinterpretq_s32_s16(tr23_16.val[0]));
-  int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]),
-                                  vreinterpretq_s32_s16(tr23_16.val[1]));
-
-  res[0] = vreinterpretq_s16_s32(tr01_32.val[0]);
-  res[1] = vreinterpretq_s16_s32(tr01_32.val[1]);
-  res[2] = vreinterpretq_s16_s32(tr23_32.val[0]);
-  res[3] = vreinterpretq_s16_s32(tr23_32.val[1]);
-}
-
-static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4],
-                                  uint16x8_t tbl, int16x8_t res[4]) {
-  res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
-  res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
-  res[2] = aom_tbl2_s16(t0[2], t1[2], tbl);
-  res[3] = aom_tbl2_s16(t0[3], t1[3], tbl);
-}
-
-static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2],
-                                  uint16x8_t tbl, int16x8_t res[2]) {
-  res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
-  res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
-}
-
 static INLINE uint16x4_t highbd_convolve12_4_y(int16x8_t s0[2], int16x8_t s1[2],
                                                int16x8_t s2[2],
                                                int16x8_t filter_0_7,
diff --git a/av1/common/arm/highbd_convolve_sve2.h b/av1/common/arm/highbd_convolve_sve2.h
new file mode 100644
index 000000000..05e23deef
--- /dev/null
+++ b/av1/common/arm/highbd_convolve_sve2.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+#define AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
+
+#include <arm_neon.h>
+
+#include "aom_dsp/arm/aom_neon_sve2_bridge.h"
+
+// clang-format off
+DECLARE_ALIGNED(16, static const uint16_t, kDotProdMergeBlockTbl[24]) = {
+  // Shift left and insert new last column in transposed 4x4 block.
+  1, 2, 3, 0, 5, 6, 7, 4,
+  // Shift left and insert two new columns in transposed 4x4 block.
+  2, 3, 0, 1, 6, 7, 4, 5,
+  // Shift left and insert three new columns in transposed 4x4 block.
+  3, 0, 1, 2, 7, 4, 5, 6,
+};
+// clang-format on
+
+static INLINE void transpose_concat_4x4(int16x4_t s0, int16x4_t s1,
+                                        int16x4_t s2, int16x4_t s3,
+                                        int16x8_t res[2]) {
+  // Transpose 16-bit elements and concatenate result rows as follows:
+  // s0: 00, 01, 02, 03
+  // s1: 10, 11, 12, 13
+  // s2: 20, 21, 22, 23
+  // s3: 30, 31, 32, 33
+  //
+  // res[0]: 00 10 20 30 01 11 21 31
+  // res[1]: 02 12 22 32 03 13 23 33
+
+  int16x8_t s0q = vcombine_s16(s0, vdup_n_s16(0));
+  int16x8_t s1q = vcombine_s16(s1, vdup_n_s16(0));
+  int16x8_t s2q = vcombine_s16(s2, vdup_n_s16(0));
+  int16x8_t s3q = vcombine_s16(s3, vdup_n_s16(0));
+
+  int32x4_t s01 = vreinterpretq_s32_s16(vzip1q_s16(s0q, s1q));
+  int32x4_t s23 = vreinterpretq_s32_s16(vzip1q_s16(s2q, s3q));
+
+  int32x4x2_t s0123 = vzipq_s32(s01, s23);
+
+  res[0] = vreinterpretq_s16_s32(s0123.val[0]);
+  res[1] = vreinterpretq_s16_s32(s0123.val[1]);
+}
+
+static INLINE void transpose_concat_8x4(int16x8_t s0, int16x8_t s1,
+                                        int16x8_t s2, int16x8_t s3,
+                                        int16x8_t res[4]) {
+  // Transpose 16-bit elements and concatenate result rows as follows:
+  // s0: 00, 01, 02, 03, 04, 05, 06, 07
+  // s1: 10, 11, 12, 13, 14, 15, 16, 17
+  // s2: 20, 21, 22, 23, 24, 25, 26, 27
+  // s3: 30, 31, 32, 33, 34, 35, 36, 37
+  //
+  // res[0]: 00 10 20 30 01 11 21 31
+  // res[1]: 02 12 22 32 03 13 23 33
+  // res[2]: 04 14 24 34 05 15 25 35
+  // res[3]: 06 16 26 36 07 17 27 37
+
+  int16x8x2_t tr01_16 = vzipq_s16(s0, s1);
+  int16x8x2_t tr23_16 = vzipq_s16(s2, s3);
+  int32x4x2_t tr01_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[0]),
+                                  vreinterpretq_s32_s16(tr23_16.val[0]));
+  int32x4x2_t tr23_32 = vzipq_s32(vreinterpretq_s32_s16(tr01_16.val[1]),
+                                  vreinterpretq_s32_s16(tr23_16.val[1]));
+
+  res[0] = vreinterpretq_s16_s32(tr01_32.val[0]);
+  res[1] = vreinterpretq_s16_s32(tr01_32.val[1]);
+  res[2] = vreinterpretq_s16_s32(tr23_32.val[0]);
+  res[3] = vreinterpretq_s16_s32(tr23_32.val[1]);
+}
+
+static INLINE void aom_tbl2x4_s16(int16x8_t t0[4], int16x8_t t1[4],
+                                  uint16x8_t tbl, int16x8_t res[4]) {
+  res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+  res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+  res[2] = aom_tbl2_s16(t0[2], t1[2], tbl);
+  res[3] = aom_tbl2_s16(t0[3], t1[3], tbl);
+}
+
+static INLINE void aom_tbl2x2_s16(int16x8_t t0[2], int16x8_t t1[2],
+                                  uint16x8_t tbl, int16x8_t res[2]) {
+  res[0] = aom_tbl2_s16(t0[0], t1[0], tbl);
+  res[1] = aom_tbl2_s16(t0[1], t1[1], tbl);
+}
+
+#endif  // AOM_AV1_COMMON_ARM_HIGHBD_CONVOLVE_SVE2_H_
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 79107c6fb..9113e4457 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -608,7 +608,7 @@ ()
   if(aom_config("CONFIG_AV1_HIGHBITDEPTH") eq "yes") {
     specialize qw/av1_highbd_dist_wtd_convolve_2d sse4_1 avx2 neon/;
     specialize qw/av1_highbd_dist_wtd_convolve_x sse4_1 avx2 neon sve2/;
-    specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon/;
+    specialize qw/av1_highbd_dist_wtd_convolve_y sse4_1 avx2 neon sve2/;
     specialize qw/av1_highbd_dist_wtd_convolve_2d_copy sse4_1 avx2 neon/;
     specialize qw/av1_highbd_convolve_2d_sr ssse3 avx2 neon sve2/;
     specialize qw/av1_highbd_convolve_2d_sr_intrabc neon/;
diff --git a/test/av1_convolve_test.cc b/test/av1_convolve_test.cc
index 12997dbfe..40a71667c 100644
--- a/test/av1_convolve_test.cc
+++ b/test/av1_convolve_test.cc
@@ -2044,6 +2044,12 @@ INSTANTIATE_TEST_SUITE_P(
     BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_y_neon));
 #endif
 
+#if HAVE_SVE2
+INSTANTIATE_TEST_SUITE_P(
+    SVE2, AV1ConvolveYHighbdCompoundTest,
+    BuildHighbdLumaParams(av1_highbd_dist_wtd_convolve_y_sve2));
+#endif
+
 #endif  // CONFIG_AV1_HIGHBITDEPTH
 
 //////////////////////////////////////////////////////