summaryrefslogtreecommitdiffstats
path: root/common/arm64/ihevc_sao_edge_offset_class2_chroma.s
diff options
context:
space:
mode:
Diffstat (limited to 'common/arm64/ihevc_sao_edge_offset_class2_chroma.s')
-rw-r--r--common/arm64/ihevc_sao_edge_offset_class2_chroma.s174
1 files changed, 87 insertions, 87 deletions
diff --git a/common/arm64/ihevc_sao_edge_offset_class2_chroma.s b/common/arm64/ihevc_sao_edge_offset_class2_chroma.s
index 2fa7c22..8e286b4 100644
--- a/common/arm64/ihevc_sao_edge_offset_class2_chroma.s
+++ b/common/arm64/ihevc_sao_edge_offset_class2_chroma.s
@@ -76,7 +76,7 @@ ihevc_sao_edge_offset_class2_chroma_av8:
ldr x9,[sp,#8]
ldr w10,[sp,#16]
ldr w11,[sp,#24]
- push_v_regs
+
// STMFD sp!, {x4-x12, x14} //stack stores the values of the arguments
@@ -322,7 +322,7 @@ PU1_AVAIL_3_LOOP:
LDR x2, [x2, #:got_lo12:gi1_table_edge_idx]
MOV x6,x7 //move wd to x6 loop_count
- movi v8.16b, #0XFF //au1_mask = vdupq_n_s8(-1)
+ movi v1.16b, #0XFF //au1_mask = vdupq_n_s8(-1)
CMP x7,#16 //Compare wd with 16
BLT WIDTH_RESIDUE //If not jump to WIDTH_RESIDUE where loop is unrolled for 8 case
@@ -338,19 +338,19 @@ WIDTH_LOOP_16:
MOV x20,#-1
csel x8, x20, x8,NE
- mov v8.8b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
+ mov v1.8b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
CMP x6,#16 //if(col == 16)
- mov v8.8b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
+ mov v1.8b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
BNE SKIP_AU1_MASK_VAL
LDRB w8,[x5,#1] //pu1_avail[1]
- mov v8.16b[14], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
- mov v8.16b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
+ mov v1.16b[14], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
+ mov v1.16b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
SKIP_AU1_MASK_VAL:
LDRB w9,[x5,#2] //pu1_avail[2]
- LD1 {v12.16b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
+ LD1 {v5.16b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
//LD1 {v13.8b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
//SUB x0, x0,#8
CMP x9,#0
@@ -366,17 +366,17 @@ SKIP_AU1_MASK_VAL:
ADD x3,x3,#16
ADD x5,sp,#0x4B //*au1_src_left_tmp
- LD1 {v10.16b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2) || vld1q_u8(pu1_src_top_cpy - 2)
+ LD1 {v3.16b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2) || vld1q_u8(pu1_src_top_cpy - 2)
//LD1 {v11.8b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2) || vld1q_u8(pu1_src_top_cpy - 2)
//SUB x8, x8,#8
SUB x7,x7,x6 //(wd - col)
ADD x7,x7,#14 //15 + (wd - col)
- cmhi v14.16b, v12.16b , v10.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
+ cmhi v17.16b, v5.16b , v3.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
mov x8, x26 //Loads *pu1_src
ADD x7,x8,x7 //pu1_src[0 * src_strd + 15 + (wd - col)]
- cmhi v16.16b, v10.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
+ cmhi v16.16b, v3.16b , v5.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
AU1_SRC_LEFT_LOOP:
LDRH w8,[x7] //load the value and increment by src_strd
@@ -388,7 +388,7 @@ AU1_SRC_LEFT_LOOP:
BNE AU1_SRC_LEFT_LOOP
ADD x8,x0,x1 //I *pu1_src + src_strd
- SUB v14.16b, v16.16b , v14.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
+ SUB v17.16b, v16.16b , v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
MOV x7,x12 //row count, move ht_tmp to x7
LD1 {v16.16b},[x8] //I pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd)
@@ -430,35 +430,35 @@ AU1_SRC_LEFT_LOOP:
csel x8, x20, x8,GT //I SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
CMP x4,#0 //I
- mov v14.8b[0], w8 //I sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
+ mov v17.8b[0], w8 //I sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
movn x20,#0
csel x4, x20, x4,LT //I
MOV x20,#1
csel x4, x20, x4,GT //I SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
- mov v14.8b[1], w4 //I sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
+ mov v17.8b[1], w4 //I sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
SIGN_UP_CHANGE_DONE:
LD1 {v30.8b},[x2] //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
- cmhi v20.16b, v12.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v20.16b, v5.16b , v18.16b //I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
- cmhi v22.16b, v18.16b , v12.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v22.16b, v18.16b , v5.16b //I vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
SUB v22.16b, v22.16b , v20.16b //I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
- ADD v18.16b, v0.16b , v14.16b //I edge_idx = vaddq_s8(const_2, sign_up)
+ ADD v18.16b, v0.16b , v17.16b //I edge_idx = vaddq_s8(const_2, sign_up)
ADD v18.16b, v18.16b , v22.16b //I edge_idx = vaddq_s8(edge_idx, sign_down)
TBL v18.16b, {v30.16b},v18.16b //I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
- NEG v14.16b, v22.16b //I sign_up = vnegq_s8(sign_down)
+ NEG v17.16b, v22.16b //I sign_up = vnegq_s8(sign_down)
//TBL v19.8b, {v30.16b},v19.8b //I vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
- EXT v14.16b, v14.16b , v14.16b,#14 //I sign_up = vextq_s8(sign_up, sign_up, 14)
+ EXT v17.16b, v17.16b , v17.16b,#14 //I sign_up = vextq_s8(sign_up, sign_up, 14)
- Uxtl v20.8h, v12.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
- AND v22.16b, v18.16b , v8.16b //I edge_idx = vandq_s8(edge_idx, au1_mask)
+ Uxtl v20.8h, v5.8b //I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
+ AND v22.16b, v18.16b , v1.16b //I edge_idx = vandq_s8(edge_idx, au1_mask)
mov v23.d[0],v22.d[1]
- Uxtl2 v18.8h, v12.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
+ Uxtl2 v18.8h, v5.16b //I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
UZP1 v31.8b, v22.8b, v23.8b
UZP2 v23.8b, v22.8b, v23.8b //I
mov v22.8b,v31.8b
@@ -469,7 +469,7 @@ SIGN_UP_CHANGE_DONE:
ZIP2 v23.8b, v22.8b, v23.8b //I
mov v22.8b,v31.8b
- mov v12.16b, v16.16b //I pu1_cur_row = pu1_next_row
+ mov v5.16b, v16.16b //I pu1_cur_row = pu1_next_row
SADDW v20.8h, v20.8h , v22.8b //I pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
SMAX v20.8h, v20.8h , v2.8h //I pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
@@ -521,17 +521,17 @@ PU1_SRC_LOOP:
movn x20,#0
csel x8, x20, x8,LT //II
- cmhi v22.16b, v12.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v22.16b, v5.16b , v28.16b //II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
MOV x20,#1
csel x8, x20, x8,GT //II SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
sub x13,x9,#1
LDRB w5,[x13] //II load the value
- mov v14.8b[0], w8 //II sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
+ mov v17.8b[0], w8 //II sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
SUB x7,x7,#1 //II Decrement the ht_tmp loop count by 1
SUB x11,x11,x5 //II pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]
- cmhi v24.16b, v28.16b , v12.16b //II vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v24.16b, v28.16b , v5.16b //II vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
CMP x11,#0 //II
movn x20,#0
@@ -545,11 +545,11 @@ PU1_SRC_LOOP:
SUB x5,x12,x7 //III ht_tmp - row
ADD x10,x0,x1
- mov v14.8b[1], w11 //II sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
+ mov v17.8b[1], w11 //II sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
LSL x5,x5,#1 //III (ht_tmp - row) * 2
ADD x9,x14,x5 //III pu1_src_left_cpy[(ht_tmp - row) * 2]
- ADD v26.16b, v0.16b , v14.16b //II edge_idx = vaddq_s8(const_2, sign_up)
+ ADD v26.16b, v0.16b , v17.16b //II edge_idx = vaddq_s8(const_2, sign_up)
LDRB w10,[x10,#1] //III pu1_src_cpy[0]
sub x13,x9,#2
@@ -562,24 +562,24 @@ PU1_SRC_LOOP:
sub x13,x9,#1
LDRB w9,[x13] //III load the value
TBL v26.16b, {v22.16b},v26.16b //II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
- NEG v14.16b, v24.16b //II sign_up = vnegq_s8(sign_down)
+ NEG v17.16b, v24.16b //II sign_up = vnegq_s8(sign_down)
movn x20,#0
csel x4, x20, x4,LT //III
SUB x10,x10,x9 //III pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]
//TBL v27.8b, {v22.16b},v27.8b //II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
- EXT v14.16b, v14.16b , v14.16b,#14 //II sign_up = vextq_s8(sign_up, sign_up, 14)
+ EXT v17.16b, v17.16b , v17.16b,#14 //II sign_up = vextq_s8(sign_up, sign_up, 14)
MOV x20,#1
csel x4, x20, x4,GT //III SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
- AND v26.16b, v26.16b , v8.16b //II edge_idx = vandq_s8(edge_idx, au1_mask)
+ AND v26.16b, v26.16b , v1.16b //II edge_idx = vandq_s8(edge_idx, au1_mask)
CMP x10,#0 //III
mov v27.d[0],v26.d[1]
UZP1 v31.8b, v26.8b, v27.8b
UZP2 v27.8b, v26.8b, v27.8b //II
mov v26.8b,v31.8b
- mov v14.8b[0], w4 //III sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
+ mov v17.8b[0], w4 //III sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
movn x20,#0
csel x10, x20, x10,LT //III
@@ -592,13 +592,13 @@ PU1_SRC_LOOP:
TBL v25.8b, {v7.16b},v27.8b //II
SUB v22.16b, v22.16b , v20.16b //III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
- mov v14.8b[1], w10 //III sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
+ mov v17.8b[1], w10 //III sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
ZIP1 v31.8b, v24.8b, v25.8b
ZIP2 v25.8b, v24.8b, v25.8b //II
mov v24.8b,v31.8b
- Uxtl v28.8h, v12.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
- ADD v18.16b, v0.16b , v14.16b //III edge_idx = vaddq_s8(const_2, sign_up)
+ Uxtl v28.8h, v5.8b //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
+ ADD v18.16b, v0.16b , v17.16b //III edge_idx = vaddq_s8(const_2, sign_up)
LD1 {v20.8b},[x2] //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
SADDW v28.8h, v28.8h , v24.8b //II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
@@ -608,13 +608,13 @@ PU1_SRC_LOOP:
UMIN v28.8h, v28.8h , v4.8h //II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
TBL v18.16b, {v20.16b},v18.16b //III vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
- NEG v14.16b, v22.16b //III sign_up = vnegq_s8(sign_down)
+ NEG v17.16b, v22.16b //III sign_up = vnegq_s8(sign_down)
//TBL v19.8b, {v20.16b},v19.8b //III vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
- EXT v14.16b, v14.16b , v14.16b,#14 //III sign_up = vextq_s8(sign_up, sign_up, 14)
+ EXT v17.16b, v17.16b , v17.16b,#14 //III sign_up = vextq_s8(sign_up, sign_up, 14)
- Uxtl2 v26.8h, v12.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
- AND v18.16b, v18.16b , v8.16b //III edge_idx = vandq_s8(edge_idx, au1_mask)
+ Uxtl2 v26.8h, v5.16b //II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
+ AND v18.16b, v18.16b , v1.16b //III edge_idx = vandq_s8(edge_idx, au1_mask)
mov v19.d[0],v18.d[1]
UZP1 v31.8b, v18.8b, v19.8b
@@ -623,7 +623,7 @@ PU1_SRC_LOOP:
TBL v22.8b, {v6.16b},v18.8b //III
SADDW v26.8h, v26.8h , v25.8b //II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
- mov v12.16b, v30.16b //III pu1_cur_row = pu1_next_row
+ mov v5.16b, v30.16b //III pu1_cur_row = pu1_next_row
TBL v23.8b, {v7.16b},v19.8b //III
SMAX v26.8h, v26.8h , v2.8h //II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
@@ -686,35 +686,35 @@ PU1_SRC_LOOP:
LD1 {v30.8b},[x2] //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
LDRB w11,[x0,#1] //pu1_src_cpy[0]
- mov v14.8b[0], w8 //sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
+ mov v17.8b[0], w8 //sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
sub x13,x9,#1
LDRB w5,[x13] //load the value
SUB x4,x11,x5 //pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]
- cmhi v22.16b, v12.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v22.16b, v5.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
CMP x4,#0
movn x20,#0
csel x4, x20, x4,LT
- cmhi v24.16b, v18.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v24.16b, v18.16b , v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
MOV x20,#1
csel x4, x20, x4,GT //SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
- mov v14.8b[1], w4 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
+ mov v17.8b[1], w4 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
SUB v24.16b, v24.16b , v22.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
- ADD v26.16b, v0.16b , v14.16b //edge_idx = vaddq_s8(const_2, sign_up)
+ ADD v26.16b, v0.16b , v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
ADD v26.16b, v26.16b , v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
mov v30.d[1],v30.d[0]
TBL v26.16b, {v30.16b},v26.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
//TBL v27.8b, {v30.16b},v27.8b //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
- Uxtl v20.8h, v12.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
- AND v26.16b, v26.16b , v8.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
+ Uxtl v20.8h, v5.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
+ AND v26.16b, v26.16b , v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
mov v27.d[0],v26.d[1]
- Uxtl2 v18.8h, v12.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
+ Uxtl2 v18.8h, v5.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
UZP1 v31.8b, v26.8b, v27.8b
UZP2 v27.8b, v26.8b, v27.8b
mov v26.8b,v31.8b
@@ -771,14 +771,14 @@ WD_16_HT_4_LOOP:
MOV x20,#-1
csel x8, x20, x8,NE
- mov v8.8b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
- mov v8.8b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
+ mov v1.8b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
+ mov v1.8b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
CMP x6,#16 //if(col == 16)
BNE SKIP_AU1_MASK_VAL_WD_16_HT_4
LDRB w8,[x5,#1] //pu1_avail[1]
- mov v8.16b[14], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
- mov v8.16b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
+ mov v1.16b[14], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
+ mov v1.16b[15], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
SKIP_AU1_MASK_VAL_WD_16_HT_4:
LDRB w8,[x5,#2] //pu1_avail[2]
@@ -788,7 +788,7 @@ SKIP_AU1_MASK_VAL_WD_16_HT_4:
csel x8, x20, x8,EQ
csel x8, x3, x8,NE //pu1_src_top_cpy
SUB x8,x8,#2 //pu1_src - src_strd - 2
- LD1 {v10.16b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2) || vld1q_u8(pu1_src_top_cpy - 2)
+ LD1 {v3.16b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2) || vld1q_u8(pu1_src_top_cpy - 2)
//LD1 {v11.8b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2) || vld1q_u8(pu1_src_top_cpy - 2)
//SUB x8, x8,#8
@@ -809,13 +809,13 @@ AU1_SRC_LEFT_LOOP_WD_16_HT_4:
SUBS x4,x4,#1 //decrement the loop count
BNE AU1_SRC_LEFT_LOOP_WD_16_HT_4
- LD1 {v12.16b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
+ LD1 {v5.16b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
//LD1 {v13.8b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
//SUB x0, x0,#8
- cmhi v14.16b, v12.16b , v10.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
- cmhi v16.16b, v10.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
- SUB v14.16b, v16.16b , v14.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
+ cmhi v17.16b, v5.16b , v3.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
+ cmhi v16.16b, v3.16b , v5.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
+ SUB v17.16b, v16.16b , v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
movi v18.16b, #0
MOV x7,x12 //row count, move ht_tmp to x7
@@ -851,7 +851,7 @@ SIGN_UP_CHANGE_WD_16_HT_4:
csel x8, x20, x8,LT
MOV x20,#1
csel x8, x20, x8,GT //SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
- mov v14.8b[0], w8 //sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
+ mov v17.8b[0], w8 //sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
LDRB w8,[x0,#1] //pu1_src_cpy[0]
sub x13,x9,#1
@@ -862,25 +862,25 @@ SIGN_UP_CHANGE_WD_16_HT_4:
csel x8, x20, x8,LT
MOV x20,#1
csel x8, x20, x8,GT //SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
- mov v14.8b[1], w8 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
+ mov v17.8b[1], w8 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
SIGN_UP_CHANGE_DONE_WD_16_HT_4:
- cmhi v22.16b, v12.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
- cmhi v24.16b, v18.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v22.16b, v5.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v24.16b, v18.16b , v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
SUB v24.16b, v24.16b , v22.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
- ADD v26.16b, v0.16b , v14.16b //edge_idx = vaddq_s8(const_2, sign_up)
+ ADD v26.16b, v0.16b , v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
ADD v26.16b, v26.16b , v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
LD1 {v22.8b},[x2] //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
TBL v26.16b, {v22.16b},v26.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
//TBL v27.8b, {v22.16b},v27.8b //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
- AND v26.16b, v26.16b , v8.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
+ AND v26.16b, v26.16b , v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
mov v27.d[0],v26.d[1]
- NEG v14.16b, v24.16b //sign_up = vnegq_s8(sign_down)
- EXT v14.16b, v14.16b , v14.16b,#14 //sign_up = vextq_s8(sign_up, sign_up, 14)
+ NEG v17.16b, v24.16b //sign_up = vnegq_s8(sign_down)
+ EXT v17.16b, v17.16b , v17.16b,#14 //sign_up = vextq_s8(sign_up, sign_up, 14)
UZP1 v31.8b, v26.8b, v27.8b
UZP2 v27.8b, v26.8b, v27.8b
@@ -891,12 +891,12 @@ SIGN_UP_CHANGE_DONE_WD_16_HT_4:
ZIP2 v25.8b, v24.8b, v25.8b
mov v24.8b,v31.8b
- Uxtl v28.8h, v12.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
+ Uxtl v28.8h, v5.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
SADDW v28.8h, v28.8h , v24.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
SMAX v28.8h, v28.8h , v2.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
UMIN v28.8h, v28.8h , v4.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
- Uxtl2 v26.8h, v12.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
+ Uxtl2 v26.8h, v5.16b //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row)))
SADDW v26.8h, v26.8h , v25.8b //pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset)
SMAX v26.8h, v26.8h , v2.8h //pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip)
UMIN v26.8h, v26.8h , v4.8h //pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip))
@@ -906,7 +906,7 @@ SIGN_UP_CHANGE_DONE_WD_16_HT_4:
ST1 { v28.16b},[x0],x1 //vst1q_u8(pu1_src_cpy, pu1_cur_row)
- mov v12.16b, v16.16b //pu1_cur_row = pu1_next_row
+ mov v5.16b, v16.16b //pu1_cur_row = pu1_next_row
SUBS x7,x7,#1 //Decrement the ht_tmp loop count by 1
BNE PU1_SRC_LOOP_WD_16_HT_4 //If not equal jump to PU1_SRC_LOOP_WD_16_HT_4
@@ -936,12 +936,12 @@ WIDTH_RESIDUE:
MOV x20,#-1
csel x8, x20, x8,NE
- mov v8.8b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
- mov v8.8b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
+ mov v1.8b[0], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
+ mov v1.8b[1], w8 //au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
LDRB w8,[x5,#1] //pu1_avail[1]
- mov v8.8b[6], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
- mov v8.8b[7], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
+ mov v1.8b[6], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
+ mov v1.8b[7], w8 //au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15)
LDRB w8,[x5,#2] //pu1_avail[2]
CMP x8,#0
@@ -950,7 +950,7 @@ WIDTH_RESIDUE:
csel x8, x20, x8,EQ
csel x8, x3, x8,NE
SUB x8,x8,#2 //pu1_src - src_strd - 2
- LD1 {v10.16b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2)
+ LD1 {v3.16b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2)
//LD1 {v11.8b},[x8] //pu1_top_row = vld1q_u8(pu1_src - src_strd - 2)
//SUB x8, x8,#8
@@ -968,13 +968,13 @@ AU1_SRC_LEFT_LOOP_RESIDUE:
SUBS x4,x4,#1 //decrement the loop count
BNE AU1_SRC_LEFT_LOOP_RESIDUE
- LD1 {v12.16b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
+ LD1 {v5.16b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
//LD1 {v13.8b},[x0] //pu1_cur_row = vld1q_u8(pu1_src)
//SUB x0, x0,#8
- cmhi v14.16b, v12.16b , v10.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
- cmhi v16.16b, v10.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
- SUB v14.16b, v16.16b , v14.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
+ cmhi v17.16b, v5.16b , v3.16b //vcgtq_u8(pu1_cur_row, pu1_top_row)
+ cmhi v16.16b, v3.16b , v5.16b //vcltq_u8(pu1_cur_row, pu1_top_row)
+ SUB v17.16b, v16.16b , v17.16b //sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
MOV x7,x12 //row count, move ht_tmp to x7
PU1_SRC_LOOP_RESIDUE:
@@ -1009,7 +1009,7 @@ SIGN_UP_CHANGE_RESIDUE:
csel x8, x20, x8,LT
MOV x20,#1
csel x8, x20, x8,GT //SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
- mov v14.8b[0], w8 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
+ mov v17.8b[0], w8 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[0] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2]), sign_up, 0)
LDRB w8,[x0,#1] //pu1_src_cpy[0]
sub x13,x9,#1
@@ -1020,14 +1020,14 @@ SIGN_UP_CHANGE_RESIDUE:
csel x8, x20, x8,LT
MOV x20,#1
csel x8, x20, x8,GT //SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2])
- mov v14.8b[1], w8 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
+ mov v17.8b[1], w8 //sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[1] - pu1_src_left_cpy[(ht_tmp - 1 - row) * 2 + 1]), sign_up, 1)
SIGN_UP_CHANGE_DONE_RESIDUE:
- cmhi v22.16b, v12.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
- cmhi v24.16b, v18.16b , v12.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v22.16b, v5.16b , v18.16b //vcgtq_u8(pu1_cur_row, pu1_next_row_tmp)
+ cmhi v24.16b, v18.16b , v5.16b //vcltq_u8(pu1_cur_row, pu1_next_row_tmp)
SUB v24.16b, v24.16b , v22.16b //sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt))
- ADD v26.16b, v0.16b , v14.16b //edge_idx = vaddq_s8(const_2, sign_up)
+ ADD v26.16b, v0.16b , v17.16b //edge_idx = vaddq_s8(const_2, sign_up)
ADD v26.16b, v26.16b , v24.16b //edge_idx = vaddq_s8(edge_idx, sign_down)
LD1 {v22.8b},[x2] //edge_idx_tbl = vld1_s8(gi1_table_edge_idx)
@@ -1035,11 +1035,11 @@ SIGN_UP_CHANGE_DONE_RESIDUE:
TBL v26.16b, {v22.16b},v26.16b //vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx))
//TBL v27.8b, {v22.16b},v27.8b //vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx))
- AND v26.16b, v26.16b , v8.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
+ AND v26.16b, v26.16b , v1.16b //edge_idx = vandq_s8(edge_idx, au1_mask)
mov v27.d[0],v26.d[1]
- NEG v14.16b, v24.16b //sign_up = vnegq_s8(sign_down)
- EXT v14.16b, v14.16b , v14.16b,#14 //sign_up = vextq_s8(sign_up, sign_up, 14)
+ NEG v17.16b, v24.16b //sign_up = vnegq_s8(sign_down)
+ EXT v17.16b, v17.16b , v17.16b,#14 //sign_up = vextq_s8(sign_up, sign_up, 14)
UZP1 v31.8b, v26.8b, v27.8b
UZP2 v27.8b, v26.8b, v27.8b
@@ -1050,7 +1050,7 @@ SIGN_UP_CHANGE_DONE_RESIDUE:
ZIP2 v25.8b, v24.8b, v25.8b
mov v24.8b,v31.8b
- Uxtl v28.8h, v12.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
+ Uxtl v28.8h, v5.8b //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row)))
SADDW v28.8h, v28.8h , v24.8b //pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset)
SMAX v28.8h, v28.8h , v2.8h //pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip)
UMIN v28.8h, v28.8h , v4.8h //pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip))
@@ -1059,7 +1059,7 @@ SIGN_UP_CHANGE_DONE_RESIDUE:
ST1 {v28.8b},[x0],x1 //vst1q_u8(pu1_src_cpy, pu1_cur_row)
- mov v12.16b, v16.16b //pu1_cur_row = pu1_next_row
+ mov v5.16b, v16.16b //pu1_cur_row = pu1_next_row
SUBS x7,x7,#1 //Decrement the ht_tmp loop count by 1
BNE PU1_SRC_LOOP_RESIDUE //If not equal jump to PU1_SRC_LOOP
@@ -1113,7 +1113,7 @@ END_LOOPS:
ldp x23, x24,[sp],#16
ldp x21, x22,[sp],#16
ldp x19, x20,[sp],#16
- pop_v_regs
+
ret