aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormtklein <mtklein@chromium.org>2015-11-17 14:19:52 -0800
committerSteve Kondik <steve@cyngn.com>2016-06-27 17:37:15 -0700
commita0bea85aea71d427007dc0d441eb47d002b68428 (patch)
tree4a73e14d3b1ec544913464ee7bfbed037614235c
parent6c050ffafcd95b17288464d4ccb42e1690e49efd (diff)
downloadandroid_external_skia-a0bea85aea71d427007dc0d441eb47d002b68428.tar.gz
android_external_skia-a0bea85aea71d427007dc0d441eb47d002b68428.tar.bz2
android_external_skia-a0bea85aea71d427007dc0d441eb47d002b68428.zip
div255(x) as ((x+128)*257)>>16 with SSE
_mm_mulhi_epu16 makes the (...*257)>>16 part simple. This seems to speed up every transfermode that uses div255(), in the 7-25% range. It even appears to obviate the need for approxMulDiv255() on SSE. I'm not sure about NEON yet, so I'll keep approxMulDiv255() for now. Should be no pixels change: https://gold.skia.org/search2?issue=1452903004&unt=true&query=source_type%3Dgm&master=false BUG=skia: CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Review URL: https://codereview.chromium.org/1452903004
-rw-r--r--src/core/Sk4px.h6
-rw-r--r--src/opts/Sk4px_NEON.h6
-rw-r--r--src/opts/Sk4px_SSE2.h9
-rw-r--r--src/opts/Sk4px_none.h6
4 files changed, 22 insertions, 5 deletions
diff --git a/src/core/Sk4px.h b/src/core/Sk4px.h
index e1d4dc1244..835a43bacd 100644
--- a/src/core/Sk4px.h
+++ b/src/core/Sk4px.h
@@ -57,11 +57,7 @@ public:
Sk4px addNarrowHi(const Sk16h&) const;
// Rounds, i.e. (x+127) / 255.
- Sk4px div255() const {
- // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
- auto v = *this + Sk16h(128);
- return v.addNarrowHi(v >> 8);
- }
+ Sk4px div255() const;
// These just keep the types as Wide so the user doesn't have to keep casting.
Wide operator * (const Wide& o) const { return INHERITED::operator*(o); }
diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h
index cd6dea9979..a8def1d418 100644
--- a/src/opts/Sk4px_NEON.h
+++ b/src/opts/Sk4px_NEON.h
@@ -57,6 +57,12 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
inline Sk4px Sk4px::alphas() const {
auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT); // ___3 ___2 ___1 ___0
return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101)); // 3333 2222 1111 0000
diff --git a/src/opts/Sk4px_SSE2.h b/src/opts/Sk4px_SSE2.h
index 3809c5e47b..cb21f2369d 100644
--- a/src/opts/Sk4px_SSE2.h
+++ b/src/opts/Sk4px_SSE2.h
@@ -45,6 +45,15 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // (x + 127) / 255 == ((x+128) * 257)>>16,
+ // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
+ const __m128i _128 = _mm_set1_epi16(128),
+ _257 = _mm_set1_epi16(257);
+ return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
+ _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
+}
+
// Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
// These are safe on x86, often with no speed penalty.
diff --git a/src/opts/Sk4px_none.h b/src/opts/Sk4px_none.h
index ba13e58fb5..b43ee875b2 100644
--- a/src/opts/Sk4px_none.h
+++ b/src/opts/Sk4px_none.h
@@ -62,6 +62,12 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
r.kth<12>(), r.kth<13>(), r.kth<14>(), r.kth<15>());
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
inline Sk4px Sk4px::alphas() const {
static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
return Sk16b(this->kth< 3>(), this->kth< 3>(), this->kth< 3>(), this->kth< 3>(),