diff options
Diffstat (limited to 'common/arm64/ihevc_sao_edge_offset_class0_chroma.s')
-rw-r--r-- | common/arm64/ihevc_sao_edge_offset_class0_chroma.s | 144 |
1 files changed, 73 insertions, 71 deletions
diff --git a/common/arm64/ihevc_sao_edge_offset_class0_chroma.s b/common/arm64/ihevc_sao_edge_offset_class0_chroma.s index d854c62..c6be41a 100644 --- a/common/arm64/ihevc_sao_edge_offset_class0_chroma.s +++ b/common/arm64/ihevc_sao_edge_offset_class0_chroma.s @@ -74,7 +74,7 @@ ihevc_sao_edge_offset_class0_chroma_av8: ldr w10,[sp,#16] ldr w11,[sp,#24] - push_v_regs + // STMFD sp!, {x4-x12, x14} //stack stores the values of the arguments stp x19, x20,[sp,#-16]! @@ -111,15 +111,15 @@ ihevc_sao_edge_offset_class0_chroma_av8: ADRP x14, :got:gi1_table_edge_idx //table pointer LDR x14, [x14, #:got_lo12:gi1_table_edge_idx] - movi v8.16b, #0xFF //au1_mask = vdupq_n_s8(-1) + movi v3.16b, #0xFF //au1_mask = vdupq_n_s8(-1) mul x4, x4, x1 //(ht - 1) * src_strd MOV x5, x23 //Loads pi1_sao_offset_v - LD1 {v11.8b},[x8] //offset_tbl = vld1_s8(pi1_sao_offset_u) + LD1 {v7.8b},[x8] //offset_tbl = vld1_s8(pi1_sao_offset_u) ADD x4,x4,x0 //pu1_src[(ht - 1) * src_strd] MOV x6,x0 //pu1_src_org - LD1 {v10.8b},[x14] //edge_idx_tbl = vld1_s8(gi1_table_edge_idx) + LD1 {v5.8b},[x14] //edge_idx_tbl = vld1_s8(gi1_table_edge_idx) MOV x12,x9 //Move wd to x12 for loop count SRC_TOP_LOOP: //wd is always multiple of 8 @@ -141,20 +141,20 @@ WIDTH_LOOP_16: CMP x8,x9 //if(col == wd) BNE AU1_MASK_FF //jump to else part LDRB w12,[x7] //pu1_avail[0] - mov v8.8b[0], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 0) - mov v8.8b[1], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 1) + mov v3.8b[0], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 0) + mov v3.8b[1], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 1) B SKIP_AU1_MASK_FF //Skip the else part AU1_MASK_FF: MOV x12,#-1 //move -1 to x12 - mov v8.4h[0], w12 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) + mov v3.4h[0], w12 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) SKIP_AU1_MASK_FF: CMP x8,#16 //If col == 16 BNE SKIP_MASKING_IF_NOT16 //If not skip masking LDRB w12,[x7,#1] //pu1_avail[1] - mov v8.8b[14], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 14) - mov v8.8b[15], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) + mov v3.8b[14], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 14) + mov v3.8b[15], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) SKIP_MASKING_IF_NOT16: MOV x12,x0 //pu1_src_cpy = pu1_src @@ -162,27 +162,27 @@ SKIP_MASKING_IF_NOT16: PU1_SRC_LOOP: LDRH w11,[x2] //load pu1_src_left since ht - row =0 when it comes first pu1_src_left is incremented later - LD1 {v12.16b},[x12],x1 //pu1_cur_row = vld1q_u8(pu1_src_cpy) + LD1 {v19.16b},[x12],x1 //pu1_cur_row = vld1q_u8(pu1_src_cpy) //LD1 {v13.8b},[x12],x1 //pu1_cur_row = vld1q_u8(pu1_src_cpy) //SUB x12, x12,#8 SUB x5,x9,x8 //wd - col SUB x14,x10,x4 //ht - row - mov v14.4h[7], w11 //vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 14,15) + mov v21.4h[7], w11 //vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 14,15) mul x14, x14, x1 //(ht - row) * src_strd LD1 {v30.16b},[x12] //II Iteration pu1_cur_row = vld1q_u8(pu1_src_cpy) //LD1 {v31.8b},[x12] //II Iteration pu1_cur_row = vld1q_u8(pu1_src_cpy) //SUB x12, x12,#8 - EXT v14.16b, v14.16b , v12.16b,#14 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14) + EXT v21.16b, v21.16b , v19.16b,#14 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14) SUB x12,x12,x1 LDRH w11,[x2,#2] //II load pu1_src_left since ht - row =0 - cmhi v16.16b, v12.16b , v14.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v16.16b, v19.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) ADD x5,x14,x5 //(ht - row) * src_strd + (wd - col) mov v28.4h[7], w11 //II vsetq_lane_u16(pu1_src_left[ht - row], pu1_cur_row_tmp, 14,15) - cmhi v18.16b, v14.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) LDRH w14,[x6,x5] //pu1_src_org[(ht - row) * src_strd + 14 + (wd - col)] SUB v20.16b, v18.16b , v16.16b //sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) @@ -191,7 +191,7 @@ PU1_SRC_LOOP: LDRB w11,[x12,#16] //pu1_src_cpy[16] EXT v28.16b, v28.16b , v30.16b,#14 //II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 14) - mov v14.8b[0], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0) + mov v21.8b[0], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0) cmhi v26.16b, v30.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) LDRB w11,[x12,#17] //pu1_src_cpy[17] @@ -199,62 +199,62 @@ PU1_SRC_LOOP: STRH w14,[x2],#2 //pu1_src_left[(ht - row)] = au1_src_left_tmp[(ht - row)] ADD x12,x12,x1 - mov v14.8b[1], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1) + mov v21.8b[1], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1) LDRB w11,[x12,#16] //II pu1_src_cpy[16] - EXT v14.16b, v12.16b , v14.16b,#2 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2) + EXT v21.16b, v19.16b , v21.16b,#2 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2) mov v28.8b[0], w11 //II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0) LDRB w11,[x12,#17] //II pu1_src_cpy[17] - cmhi v16.16b, v12.16b , v14.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v16.16b, v19.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) SUB x12,x12,x1 - cmhi v18.16b, v14.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) mov v28.8b[1], w11 //II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1) SUB v22.16b, v18.16b , v16.16b //sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) EXT v28.16b, v30.16b , v28.16b,#2 //II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 2) - ADD v14.16b, v2.16b , v20.16b //edge_idx = vaddq_s8(const_2, sign_left) + ADD v21.16b, v2.16b , v20.16b //edge_idx = vaddq_s8(const_2, sign_left) - mov v10.d[1],v10.d[0] - ADD v14.16b, v14.16b , v22.16b //edge_idx = vaddq_s8(edge_idx, sign_right) - TBL v14.16b, {v10.16b},v14.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) + mov v5.d[1],v5.d[0] + ADD v21.16b, v21.16b , v22.16b //edge_idx = vaddq_s8(edge_idx, sign_right) + TBL v21.16b, {v5.16b},v21.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) SUB v20.16b, v24.16b , v26.16b //II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) cmhi v26.16b, v30.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) // TBL v15.8b, {v10.16b},v15.8b //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) cmhi v24.16b, v28.16b , v30.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) - AND v14.16b, v14.16b , v8.16b //edge_idx = vandq_s8(edge_idx, au1_mask) - mov v15.d[0],v14.d[1] - UZP1 v1.8b, v14.8b, v15.8b - UZP2 v15.8b, v14.8b, v15.8b - mov v14.8b, v1.8b + AND v21.16b, v21.16b , v3.16b //edge_idx = vandq_s8(edge_idx, au1_mask) + mov v23.d[0],v21.d[1] + UZP1 v1.8b, v21.8b, v23.8b + UZP2 v23.8b, v21.8b, v23.8b + mov v21.8b, v1.8b //mov v11.d[1],v0.d[0] //mov v14.d[1],v15.d[0] SUB v22.16b, v24.16b , v26.16b //II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) - TBL v16.8b, {v11.16b},v14.8b //offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) + TBL v16.8b, {v7.16b},v21.8b //offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) ADD v24.16b, v2.16b , v20.16b //II edge_idx = vaddq_s8(const_2, sign_left) - Uxtl v18.8h, v12.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) - TBL v17.8b, {v0.16b},v15.8b + Uxtl v18.8h, v19.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) + TBL v17.8b, {v0.16b},v23.8b ADD v24.16b, v24.16b , v22.16b //II edge_idx = vaddq_s8(edge_idx, sign_right) //mov v17.d[0],v16.d[1] ZIP1 v1.8b, v16.8b, v17.8b ZIP2 v17.8b, v16.8b, v17.8b mov v16.8b, v1.8b - TBL v24.16b, {v10.16b},v24.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) - Uxtl2 v12.8h, v12.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) + TBL v24.16b, {v5.16b},v24.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) + Uxtl2 v19.8h, v19.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) //mov v16.d[1],v17.d[0] SADDW v18.8h, v18.8h , v16.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) //TBL v25.8b, {v10.16b},v25.8b //II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) SMAX v18.8h, v18.8h , v4.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) - AND v24.16b, v24.16b , v8.16b //II edge_idx = vandq_s8(edge_idx, au1_mask) + AND v24.16b, v24.16b , v3.16b //II edge_idx = vandq_s8(edge_idx, au1_mask) mov v25.d[0],v24.d[1] UMIN v18.8h, v18.8h , v6.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) UZP1 v1.8b, v24.8b, v25.8b @@ -262,16 +262,16 @@ PU1_SRC_LOOP: mov v24.8b, v1.8b //mov v24.d[1],v25.d[0] - SADDW v12.8h, v12.8h , v17.8b //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) - TBL v26.8b, {v11.16b},v24.8b //II offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) - SMAX v12.8h, v12.8h , v4.8h //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) + SADDW v19.8h, v19.8h , v17.8b //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) + TBL v26.8b, {v7.16b},v24.8b //II offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) + SMAX v19.8h, v19.8h , v4.8h //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) - UMIN v12.8h, v12.8h , v6.8h //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip)) + UMIN v19.8h, v19.8h , v6.8h //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip)) TBL v27.8b, {v0.16b},v25.8b //II - xtn v14.8b, v18.8h //vmovn_s16(pi2_tmp_cur_row.val[0]) + xtn v21.8b, v18.8h //vmovn_s16(pi2_tmp_cur_row.val[0]) //mov v27.d[0],v26.d[1] - xtn v15.8b, v12.8h //vmovn_s16(pi2_tmp_cur_row.val[1]) + xtn v23.8b, v19.8h //vmovn_s16(pi2_tmp_cur_row.val[1]) ZIP1 v1.8b, v26.8b, v27.8b ZIP2 v27.8b, v26.8b, v27.8b //II mov v26.8b, v1.8b @@ -295,7 +295,9 @@ PU1_SRC_LOOP: Uxtl2 v30.8h, v30.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) SADDW v30.8h, v30.8h , v27.8b //II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) - ST1 {v14.8b, v15.8b},[x12],x1 //vst1q_u8(pu1_src_cpy, pu1_cur_row) + ST1 {v21.8b},[x12],#8 //vst1q_u8(pu1_src_cpy, pu1_cur_row) + ST1 {v23.8b},[x12],x1 + SUB x12,x12,#8 SMAX v30.8h, v30.8h , v4.8h //II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) SUBS x4,x4,#1 //Decrement row by 1 @@ -326,107 +328,107 @@ WIDTH_RESIDUE: CMP x8,x9 //if(wd_rem == wd) BNE AU1_MASK_FF_RESIDUE //jump to else part LDRB w12,[x7] //pu1_avail[0] - mov v8.8b[0], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 0) - mov v8.8b[1], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 0) + mov v3.8b[0], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 0) + mov v3.8b[1], w12 //vsetq_lane_s8(pu1_avail[0], au1_mask, 0) B SKIP_AU1_MASK_FF_RESIDUE //Skip the else part AU1_MASK_FF_RESIDUE: MOV x12,#-1 //move -1 to x12 - mov v8.4h[0], w12 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) + mov v3.4h[0], w12 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0) SKIP_AU1_MASK_FF_RESIDUE: LDRB w12,[x7,#1] //pu1_avail[1] - mov v8.8b[6], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) - mov v8.8b[7], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) + mov v3.8b[6], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) + mov v3.8b[7], w12 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) MOV x12,x0 //pu1_src_cpy = pu1_src MOV x4,x10 //move ht to x4 for loop count PU1_SRC_LOOP_RESIDUE: LDRH w11,[x2] //load pu1_src_left - LD1 {v12.16b},[x12],x1 //pu1_cur_row = vld1q_u8(pu1_src_cpy) + LD1 {v19.16b},[x12],x1 //pu1_cur_row = vld1q_u8(pu1_src_cpy) //LD1 {v13.8b},[x12],x1 //pu1_cur_row = vld1q_u8(pu1_src_cpy) //SUB x12, x12,#8 SUB x5,x9,#2 //wd - 2 SUB x14,x10,x4 //(ht - row) - mov v14.4h[7], w11 //vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15) + mov v21.4h[7], w11 //vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15) LSL x14,x14,#1 //(ht - row) * 2 LD1 {v30.16b},[x12] //II pu1_cur_row = vld1q_u8(pu1_src_cpy) //LD1 {v31.8b},[x12] //II pu1_cur_row = vld1q_u8(pu1_src_cpy) //SUB x12, x12,#8 - EXT v14.16b, v14.16b , v12.16b,#14 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15) + EXT v21.16b, v21.16b , v19.16b,#14 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15) SUB x12,x12,x1 LDRH w11,[x2,#2] //II load pu1_src_left - cmhi v16.16b, v12.16b , v14.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v16.16b, v19.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) mul x14, x14, x1 //(ht - row) * 2 * src_strd - cmhi v18.16b, v14.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) mov v28.4h[7], w11 //II vsetq_lane_u8(pu1_src_left[ht - row], pu1_cur_row_tmp, 15) LDRB w11,[x12,#16] //pu1_src_cpy[16] SUB v20.16b, v18.16b , v16.16b //sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) ADD x5,x14,x5 //(ht - row) * 2 * src_strd + (wd - 2) - mov v14.8b[0], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0) + mov v21.8b[0], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0) EXT v28.16b, v28.16b , v30.16b,#14 //II pu1_cur_row_tmp = vextq_u8(pu1_cur_row_tmp, pu1_cur_row, 15) LDRB w11,[x12,#17] //pu1_src_cpy[17] cmhi v26.16b, v30.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) LDRH w14,[x6, x5] //pu1_src_org[(ht - row) * 2* src_strd + (wd - 2)] - mov v14.8b[1], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1) + mov v21.8b[1], w11 //pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1) cmhi v24.16b, v28.16b , v30.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) ADD x12,x12,x1 STRH w14,[x2],#2 //pu1_src_left[(ht - row) * 2] = au1_src_left_tmp[(ht - row) * 2] - EXT v14.16b, v12.16b , v14.16b,#2 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1) + EXT v21.16b, v19.16b , v21.16b,#2 //pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1) LDRB w11,[x12,#16] //II pu1_src_cpy[16] - cmhi v16.16b, v12.16b , v14.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v16.16b, v19.16b , v21.16b //vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) mov v28.8b[0], w11 //II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[16], pu1_cur_row_tmp, 0) LDRB w11,[x12,#17] //II pu1_src_cpy[17] - cmhi v18.16b, v14.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) + cmhi v18.16b, v21.16b , v19.16b //vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) SUB x4,x4,#1 //II Decrement row by 1 SUB v22.16b, v18.16b , v16.16b //sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) mov v28.8b[1], w11 //II pu1_cur_row_tmp = vsetq_lane_u8(pu1_src_cpy[17], pu1_cur_row_tmp, 1) SUB x12,x12,x1 - ADD v14.16b, v2.16b , v20.16b //edge_idx = vaddq_s8(const_2, sign_left) + ADD v21.16b, v2.16b , v20.16b //edge_idx = vaddq_s8(const_2, sign_left) EXT v28.16b, v30.16b , v28.16b,#2 //II pu1_cur_row_tmp = vextq_u8(pu1_cur_row, pu1_cur_row_tmp, 1) - ADD v14.16b, v14.16b , v22.16b //edge_idx = vaddq_s8(edge_idx, sign_right) + ADD v21.16b, v21.16b , v22.16b //edge_idx = vaddq_s8(edge_idx, sign_right) SUB v20.16b, v24.16b , v26.16b //II sign_left = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) - TBL v14.16b, {v10.16b},v14.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) + TBL v21.16b, {v5.16b},v21.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) cmhi v26.16b, v30.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_cur_row_tmp) cmhi v24.16b, v28.16b , v30.16b //II vcltq_u8(pu1_cur_row, pu1_cur_row_tmp) //TBL v15.8b, {v10.16b},v15.8b //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) SUB v22.16b, v24.16b , v26.16b //II sign_right = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) - AND v14.16b, v14.16b , v8.16b //edge_idx = vandq_s8(edge_idx, au1_mask) - mov v15.d[0],v14.d[1] - UZP1 v1.8b, v14.8b, v15.8b - UZP2 v15.8b, v14.8b, v15.8b - mov v14.8b, v1.8b + AND v21.16b, v21.16b , v3.16b //edge_idx = vandq_s8(edge_idx, au1_mask) + mov v23.d[0],v21.d[1] + UZP1 v1.8b, v21.8b, v23.8b + UZP2 v23.8b, v21.8b, v23.8b + mov v21.8b, v1.8b ADD v28.16b, v2.16b , v20.16b //II edge_idx = vaddq_s8(const_2, sign_left) - TBL v16.8b, {v11.16b},v14.8b //offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) + TBL v16.8b, {v7.16b},v21.8b //offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) ADD v28.16b, v28.16b , v22.16b //II edge_idx = vaddq_s8(edge_idx, sign_right) - Uxtl v18.8h, v12.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) - TBL v17.8b, {v0.16b},v15.8b + Uxtl v18.8h, v19.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) + TBL v17.8b, {v0.16b},v23.8b Uxtl v24.8h, v30.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) ZIP1 v1.8b, v16.8b, v17.8b ZIP2 v17.8b, v16.8b, v17.8b mov v16.8b, v1.8b - TBL v28.16b, {v10.16b},v28.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) + TBL v28.16b, {v5.16b},v28.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) SADDW v18.8h, v18.8h , v16.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) SMAX v18.8h, v18.8h , v4.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) @@ -434,7 +436,7 @@ PU1_SRC_LOOP_RESIDUE: UMIN v18.8h, v18.8h , v6.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) xtn v18.8b, v18.8h //vmovn_s16(pi2_tmp_cur_row.val[0]) - AND v28.16b, v28.16b , v8.16b //II edge_idx = vandq_s8(edge_idx, au1_mask) + AND v28.16b, v28.16b , v3.16b //II edge_idx = vandq_s8(edge_idx, au1_mask) mov v29.d[0],v28.d[1] SUB x5,x9,#2 //II wd - 2 UZP1 v1.8b, v28.8b, v29.8b @@ -443,7 +445,7 @@ PU1_SRC_LOOP_RESIDUE: SUB x14,x10,x4 //II (ht - row) LSL x14,x14,#1 //II (ht - row) * 2 - TBL v26.8b, {v11.16b},v28.8b //II offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) + TBL v26.8b, {v7.16b},v28.8b //II offset = vtbl1_s8(offset_tbl_u, vget_low_s8(edge_idx)) mul x14, x14, x1 //II (ht - row) * 2 * src_strd ADD x5,x14,x5 //II (ht - row) * 2 * src_strd + (wd - 2) @@ -474,7 +476,7 @@ END_LOOPS: ldp x23, x24,[sp],#16 ldp x21, x22,[sp],#16 ldp x19, x20,[sp],#16 - pop_v_regs + ret |