diff options
author | mtklein <mtklein@chromium.org> | 2015-06-22 11:00:47 -0700 |
---|---|---|
committer | Steve Kondik <steve@cyngn.com> | 2016-06-27 17:33:59 -0700 |
commit | 66b1265cd8ce01288de9be29970719e519a3dfd8 (patch) | |
tree | 1560054832c2e9c0fd0f63d3f31f49d6d9704e57 | |
parent | d763b7472ad70d657fe20be6b3ed5275f5d72b6f (diff) | |
download | android_external_skia-66b1265cd8ce01288de9be29970719e519a3dfd8.tar.gz android_external_skia-66b1265cd8ce01288de9be29970719e519a3dfd8.tar.bz2 android_external_skia-66b1265cd8ce01288de9be29970719e519a3dfd8.zip |
Use vmulq_n_u32(..., 0x01010101) to distribute alphas.
This seems to make alphas() faster and Load[24]Alphas() no slower.
The change is particularly noticeable on xfermodes that call alphas()
twice (on src and dst), with a 10-12% speedup.
Xfermode_Difference_aa 29ms -> 28.4ms 0.98x
Xfermode_DstATop_aa 27.2ms -> 26.7ms 0.98x
Xfermode_Xor_aa 27.2ms -> 26.5ms 0.98x
Xfermode_DstOver 23.6ms -> 22.9ms 0.97x
Xfermode_DstOver_aa 27.8ms -> 26.8ms 0.96x
Xfermode_DstOut 22.6ms -> 21.7ms 0.96x
Xfermode_Multiply_aa 30ms -> 28.5ms 0.95x
Xfermode_DstOut_aa 26.1ms -> 24.8ms 0.95x
Xfermode_DstIn_aa 25.4ms -> 24.1ms 0.95x
Xfermode_DstATop 28.7ms -> 26ms 0.9x
Xfermode_Multiply 35.5ms -> 31.3ms 0.88x
Xfermode_Difference 31.8ms -> 27.7ms 0.87x
Xfermode_Xor 30.1ms -> 26.1ms 0.87x
BUG=skia:
Review URL: https://codereview.chromium.org/1203513002
-rw-r--r-- | src/opts/Sk4px_NEON.h | 35 |
1 files changed, 14 insertions, 21 deletions
diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h index 644a71f783..9401864697 100644 --- a/src/opts/Sk4px_NEON.h +++ b/src/opts/Sk4px_NEON.h @@ -52,33 +52,26 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { } inline Sk4px Sk4px::alphas() const { - static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian."); - auto as = vshrq_n_u32((uint32x4_t)this->fVec, 24); // ___3 ___2 ___1 ___0 - as = vorrq_u32(as, vshlq_n_u32(as, 8)); // __33 __22 __11 __11 - as = vorrq_u32(as, vshlq_n_u32(as, 16)); // 3333 2222 1111 1111 - return Sk16b((uint8x16_t)as); + auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT); // ___3 ___2 ___1 ___0 + return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101)); // 3333 2222 1111 0000 } inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { - uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____ - a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0 - a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0 - a8 = vld1q_lane_u8(a+2, a8, 8); // ____ ___2 ___1 ___0 - a8 = vld1q_lane_u8(a+3, a8, 12); // ___3 ___2 ___1 ___0 - auto a32 = (uint32x4_t)a8; // - a32 = vorrq_u32(a32, vshlq_n_u32(a32, 8)); // __33 __22 __11 __00 - a32 = vorrq_u32(a32, vshlq_n_u32(a32, 16)); // 3333 2222 1111 0000 - return Sk16b((uint8x16_t)a32); + uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____ + a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0 + a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0 + a8 = vld1q_lane_u8(a+2, a8, 8); // ____ ___2 ___1 ___0 + a8 = vld1q_lane_u8(a+3, a8, 12); // ___3 ___2 ___1 ___0 + auto a32 = (uint32x4_t)a8; // + return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // 3333 2222 1111 0000 } inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) { - uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____ - a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0 - a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0 - auto a32 = (uint32x4_t)a8; // - a32 = vorrq_u32(a32, vshlq_n_u32(a32, 8)); // ____ ____ __11 __00 - a32 = vorrq_u32(a32, vshlq_n_u32(a32, 16)); // ____ ____ 1111 0000 - return Sk16b((uint8x16_t)a32); + uint8x16_t a8 = vdupq_n_u8(0); // ____ ____ ____ ____ + a8 = vld1q_lane_u8(a+0, a8, 0); // ____ ____ ____ ___0 + a8 = vld1q_lane_u8(a+1, a8, 4); // ____ ____ ___1 ___0 + auto a32 = (uint32x4_t)a8; // + return Sk16b((uint8x16_t)vmulq_n_u32(a32, 0x01010101)); // ____ ____ 1111 0000 } inline Sk4px Sk4px::zeroColors() const { |