summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlin Jerpelea <jerpelea@gmail.com>2013-01-03 20:10:06 +0200
committerGerrit Code Review <gerrit@review.cyanogenmod.com>2013-01-11 01:01:13 -0800
commita22b34c43efb805e5ec10230b1b089c781366120 (patch)
tree8453d24edbae48f26d466d4bbd57131ca5440184
parentdd580a0ef9d5cbcd425900b48cc1e1453be1cf7e (diff)
downloadandroid_frameworks_base-a22b34c43efb805e5ec10230b1b089c781366120.tar.gz
android_frameworks_base-a22b34c43efb805e5ec10230b1b089c781366120.tar.bz2
android_frameworks_base-a22b34c43efb805e5ec10230b1b089c781366120.zip
ST_ERICSSON: Adding multimedia extension
adapted from commit 151ce20c17e1a097a2edf5b467650941a1cc6287 Author: Patrik Ryd <patrik.ryd@stericsson.com> Date: Thu Apr 5 20:18:30 2012 +0000 Adding multimedia extension More specifically: - Add support for ST-Ericsson native pixel formats and conversion to and from these formats. - The Khronos headers are out of date and Mali extensions are missing from the extension headers. GLES2/gl2ext.h has been updated with the missing ARM extensions. - ST-Ericsson audio support for multimedia. - ST-Ericsson video support for multimedia. Change-Id: I716bdd850bc0e3ef783f7814e98e39c53f70d7e4
-rw-r--r--include/camera/CameraParameters.h19
-rw-r--r--include/gui/SurfaceTexture.h57
-rw-r--r--include/media/stagefright/ColorConverter.h4
-rwxr-xr-xinclude/media/stagefright/MediaDefs.h6
-rwxr-xr-xinclude/media/stagefright/OMXCodec.h3
-rw-r--r--include/media/stagefright/openmax/OMX_IVCommon.h3
-rw-r--r--include/ui/PixelFormat.h8
-rw-r--r--include/ui/Region.h27
-rw-r--r--libs/camera/CameraParameters.cpp19
-rw-r--r--libs/gui/SurfaceTexture.cpp239
-rw-r--r--libs/ui/PixelFormat.cpp18
-rwxr-xr-xmedia/libstagefright/CameraSource.cpp16
-rwxr-xr-xmedia/libstagefright/MediaDefs.cpp6
-rw-r--r--media/libstagefright/OMXCodec.cpp89
-rw-r--r--media/libstagefright/colorconversion/ColorConverter.cpp142
-rw-r--r--media/libstagefright/omx/SoftOMXPlugin.cpp6
-rw-r--r--opengl/include/GLES2/gl2ext.h29
-rwxr-xr-xservices/surfaceflinger/DisplayHardware/DisplayHardware.cpp4
-rw-r--r--services/surfaceflinger/Layer.cpp16
-rw-r--r--services/surfaceflinger/SurfaceFlinger.cpp55
20 files changed, 766 insertions, 0 deletions
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index 7cc3d4efbaa..9f8676bfb90 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -750,12 +750,26 @@ public:
static const char SCENE_DETECT_ON[];
#endif
static const char PIXEL_FORMAT_YUV422SP[];
+#ifdef STE_HARDWARE
+ static const char PIXEL_FORMAT_YUV420P[]; // YV12
+#endif
static const char PIXEL_FORMAT_YUV420SP[]; // NV21
+#ifdef STE_HARDWARE
+ static const char PIXEL_FORMAT_YUV420SPNV12[]; // NV12
+#endif
#ifdef QCOM_HARDWARE
static const char PIXEL_FORMAT_YUV420SP_ADRENO[]; // ADRENO
#endif
static const char PIXEL_FORMAT_YUV422I[]; // YUY2
+#ifndef STE_HARDWARE
static const char PIXEL_FORMAT_YUV420P[]; // YV12
+#else
+ static const char PIXEL_FORMAT_YVU422SP[];
+ static const char PIXEL_FORMAT_YVU422P[];
+ static const char PIXEL_FORMAT_YVU420SP[];
+ static const char PIXEL_FORMAT_YVU420P[];
+ static const char PIXEL_FORMAT_YUV420MB[];
+#endif
static const char PIXEL_FORMAT_RGB565[];
static const char PIXEL_FORMAT_RGBA8888[];
static const char PIXEL_FORMAT_JPEG[];
@@ -819,6 +833,11 @@ public:
// other modes.
static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
+#ifdef STE_HARDWARE
+ // keys for record stride and slice height
+ static const char KEY_RECORD_STRIDE[];
+ static const char KEY_RECORD_SLICE_HEIGHT[];
+#endif
#ifdef QCOM_HARDWARE
static const char FOCUS_MODE_CONTINUOUS_CAMERA[];
diff --git a/include/gui/SurfaceTexture.h b/include/gui/SurfaceTexture.h
index 36c5b72d1e6..cba73b761f1 100644
--- a/include/gui/SurfaceTexture.h
+++ b/include/gui/SurfaceTexture.h
@@ -30,6 +30,9 @@
#include <utils/Vector.h>
#include <utils/threads.h>
+#ifdef STE_HARDWARE
+#include <hardware/copybit.h>
+#endif
#define ANDROID_GRAPHICS_SURFACETEXTURE_JNI_ID "mSurfaceTexture"
namespace android {
@@ -47,6 +50,9 @@ public:
};
enum { NUM_BUFFER_SLOTS = 32 };
enum { NO_CONNECTED_API = 0 };
+#ifdef STE_HARDWARE
+ enum { NUM_BLIT_BUFFER_SLOTS = 2 };
+#endif
struct FrameAvailableListener : public virtual RefBase {
// onFrameAvailable() is called from queueBuffer() each time an
@@ -136,12 +142,26 @@ public:
// connected to the specified client API.
virtual status_t disconnect(int api);
+#ifndef STE_HARDWARE
// updateTexImage sets the image contents of the target texture to that of
// the most recently queued buffer.
//
// This call may only be made while the OpenGL ES context to which the
// target texture belongs is bound to the calling thread.
status_t updateTexImage(bool isComposition = false);
+#else
+ status_t updateTexImage();
+
+ // A surface that uses a non-native format requires conversion of
+ // its buffers. This conversion can be deferred until the layer
+ // based on this surface is drawn.
+ status_t updateTexImage(bool deferConversion);
+
+ // convert() performs the deferred texture conversion as scheduled
+ // by updateTexImage(bool deferConversion).
+ // The method returns immediately if no conversion is necessary.
+ status_t convert();
+#endif
// setBufferCountServer set the buffer count. If the client has requested
// a buffer count using setBufferCount, the server-buffer count will
@@ -267,6 +287,13 @@ private:
EGLImageKHR createImage(EGLDisplay dpy,
const sp<GraphicBuffer>& graphicBuffer);
+#ifdef STE_HARDWARE
+ // returns TRUE if buffer needs color format conversion
+ bool conversionIsNeeded(const sp<GraphicBuffer>& graphicBuffer);
+
+ // converts buffer to a suitable color format
+ status_t convert(sp<GraphicBuffer> &srcBuf, sp<GraphicBuffer> &dstBuf);
+#endif
status_t setBufferCountServerLocked(int bufferCount);
// computeCurrentTransformMatrix computes the transform matrix for the
@@ -516,6 +543,36 @@ private:
// with the surface Texture.
uint64_t mFrameCounter;
+#ifdef STE_HARDWARE
+ // mBlitEngine is the handle to the copybit device which will be used in
+ // case color transform is needed before the EGL image is created.
+ copybit_device_t* mBlitEngine;
+
+ // mBlitSlots contains several buffers which will
+ // be rendered alternately in case color transform is needed (instead
+ // of rendering the buffers in mSlots).
+ BufferSlot mBlitSlots[NUM_BLIT_BUFFER_SLOTS];
+
+ // mNextBlitSlot is the index of the blitter buffer (in mBlitSlots) which
+ // will be used in the next color transform.
+ int mNextBlitSlot;
+
+ // mConversionSrcSlot designates the slot where source buffer
+ // for the last deferred updateTexImage is located.
+ int mConversionSrcSlot;
+
+ // mConversionBltSlot designates the slot where destination buffer
+ // for the last deferred updateTexImage is located.
+ int mConversionBltSlot;
+
+ // mNeedsConversion indicates that a format conversion is necessary
+ // before the layer based on this surface is drawn.
+ // This flag is set whenever updateTexImage() with deferred conversion
+ // is called. It is cleared once the layer is drawn,
+ // or when updateTexImage() w/o deferred conversion is called.
+ bool mNeedsConversion;
+#endif
+
#ifdef QCOM_HARDWARE
// s3dFormat is the S3D format specified by the client.
int mS3DFormat;
diff --git a/include/media/stagefright/ColorConverter.h b/include/media/stagefright/ColorConverter.h
index df26f779f35..52654a7d44b 100644
--- a/include/media/stagefright/ColorConverter.h
+++ b/include/media/stagefright/ColorConverter.h
@@ -74,6 +74,10 @@ private:
status_t convertQCOMYUV420SemiPlanar(
const BitmapParams &src, const BitmapParams &dst);
+#ifdef STE_HARDWARE
+ status_t convertSTEYUV420PackedSemiPlanarMB(
+ const BitmapParams &src, const BitmapParams &dst);
+#endif
status_t convertYUV420SemiPlanar(
const BitmapParams &src, const BitmapParams &dst);
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 95590839f0b..1704cc51fd3 100755
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -27,8 +27,14 @@ extern const char *MEDIA_MIMETYPE_VIDEO_VPX;
extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
extern const char *MEDIA_MIMETYPE_VIDEO_H263;
+#ifdef STE_HARDWARE
+extern const char *MEDIA_MIMETYPE_VIDEO_H263_SW;
+#endif
extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
+#ifdef STE_HARDWARE
+extern const char *MEDIA_MIMETYPE_VIDEO_VC1;
+#endif
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index e338ba47696..5b8051fa864 100755
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -156,6 +156,9 @@ private:
kAvoidMemcopyInputRecordingFrames = 2048,
kRequiresLargerEncoderOutputBuffer = 4096,
kOutputBuffersAreUnreadable = 8192,
+#ifdef STE_HARDWARE
+ kRequiresStoreMetaDataBeforeIdle = 16384,
+#endif
#ifdef QCOM_HARDWARE
kStoreMetaDataInInputVideoBuffers = 16384,
kRequiresGlobalFlush = 0x20000000, // 2^29
diff --git a/include/media/stagefright/openmax/OMX_IVCommon.h b/include/media/stagefright/openmax/OMX_IVCommon.h
index 7391e56972c..121a8a3181d 100644
--- a/include/media/stagefright/openmax/OMX_IVCommon.h
+++ b/include/media/stagefright/openmax/OMX_IVCommon.h
@@ -158,6 +158,9 @@ typedef enum OMX_COLOR_FORMATTYPE {
* */
OMX_COLOR_FormatAndroidOpaque = 0x7F000789,
OMX_TI_COLOR_FormatYUV420PackedSemiPlanar = 0x7F000100,
+#ifdef STE_HARDWARE
+ OMX_STE_COLOR_FormatYUV420PackedSemiPlanarMB = 0x7FA00000,
+#endif
#ifndef QCOM_HARDWARE
OMX_QCOM_COLOR_FormatYVU420SemiPlanar = 0x7FA30C00,
#endif
diff --git a/include/ui/PixelFormat.h b/include/ui/PixelFormat.h
index 848c5a11490..704959b8fc9 100644
--- a/include/ui/PixelFormat.h
+++ b/include/ui/PixelFormat.h
@@ -72,6 +72,14 @@ enum {
// New formats can be added if they're also defined in
// pixelflinger/format.h
+
+#ifdef STE_HARDWARE
+ // Added Support for YUV42XMBN,
+ // Required for Copybit CC acceleration
+ PIXEL_FORMAT_YCBCR42XMBN = HAL_PIXEL_FORMAT_YCBCR42XMBN,
+ PIXEL_FORMAT_YCbCr_420_SP = HAL_PIXEL_FORMAT_YCbCr_420_SP,
+ PIXEL_FORMAT_YCbCr_420_P = HAL_PIXEL_FORMAT_YCbCr_420_P,
+#endif
};
typedef int32_t PixelFormat;
diff --git a/include/ui/Region.h b/include/ui/Region.h
index 6c9a6203e79..d6b612ac15e 100644
--- a/include/ui/Region.h
+++ b/include/ui/Region.h
@@ -24,6 +24,10 @@
#include <ui/Rect.h>
+#ifdef STE_HARDWARE
+#include <hardware/copybit.h>
+#endif
+
namespace android {
// ---------------------------------------------------------------------------
@@ -181,6 +185,29 @@ Region& Region::operator -= (const Region& rhs) {
Region& Region::operator += (const Point& pt) {
return translateSelf(pt.x, pt.y);
}
+
+
+#ifdef STE_HARDWARE
+// ---------------------------------------------------------------------------
+
+struct region_iterator : public copybit_region_t {
+ region_iterator(const Region& region)
+ : b(region.begin()), e(region.end()) {
+ this->next = iterate;
+ }
+private:
+ static int iterate(copybit_region_t const * self, copybit_rect_t* rect) {
+ region_iterator const* me = static_cast<region_iterator const*>(self);
+ if (me->b != me->e) {
+ *reinterpret_cast<Rect*>(rect) = *me->b++;
+ return 1;
+ }
+ return 0;
+ }
+ mutable Region::const_iterator b;
+ Region::const_iterator const e;
+};
+#endif
// ---------------------------------------------------------------------------
}; // namespace android
diff --git a/libs/camera/CameraParameters.cpp b/libs/camera/CameraParameters.cpp
index 6ed7a2592f1..ee768ae62b5 100644
--- a/libs/camera/CameraParameters.cpp
+++ b/libs/camera/CameraParameters.cpp
@@ -248,12 +248,26 @@ const char CameraParameters::SCENE_DETECT_ON[] = "on";
// Formats for setPreviewFormat and setPictureFormat.
const char CameraParameters::PIXEL_FORMAT_YUV422SP[] = "yuv422sp";
+#ifdef STE_HARDWARE
+const char CameraParameters::PIXEL_FORMAT_YUV420P[] = "yuv420p";
+#endif
const char CameraParameters::PIXEL_FORMAT_YUV420SP[] = "yuv420sp";
+#ifdef STE_HARDWARE
+const char CameraParameters::PIXEL_FORMAT_YUV420SPNV12[] = "yuv420spnv12";
+#endif
#ifdef QCOM_HARDWARE
const char CameraParameters::PIXEL_FORMAT_YUV420SP_ADRENO[] = "yuv420sp-adreno";
#endif
const char CameraParameters::PIXEL_FORMAT_YUV422I[] = "yuv422i-yuyv";
+#ifndef STE_HARDWARE
const char CameraParameters::PIXEL_FORMAT_YUV420P[] = "yuv420p";
+#else
+const char CameraParameters::PIXEL_FORMAT_YUV420MB[] = "yuv420mb";
+const char CameraParameters::PIXEL_FORMAT_YVU422SP[] = "yvu422sp";
+const char CameraParameters::PIXEL_FORMAT_YVU422P[] = "yvu422p";
+const char CameraParameters::PIXEL_FORMAT_YVU420SP[] = "yvu420sp";
+const char CameraParameters::PIXEL_FORMAT_YVU420P[] = "yvu420p";
+#endif
const char CameraParameters::PIXEL_FORMAT_RGB565[] = "rgb565";
const char CameraParameters::PIXEL_FORMAT_RGBA8888[] = "rgba8888";
const char CameraParameters::PIXEL_FORMAT_JPEG[] = "jpeg";
@@ -395,6 +409,11 @@ void CameraParameters::setOrientation(int orientation)
}
#endif
+#ifdef STE_HARDWARE
+// keys for record stride and sliceheight
+const char CameraParameters::KEY_RECORD_STRIDE[] = "record-stride";
+const char CameraParameters::KEY_RECORD_SLICE_HEIGHT[] = "record-slice-height";
+#endif
CameraParameters::CameraParameters()
: mMap()
diff --git a/libs/gui/SurfaceTexture.cpp b/libs/gui/SurfaceTexture.cpp
index 6a26e6ae6b3..8f628ac3386 100644
--- a/libs/gui/SurfaceTexture.cpp
+++ b/libs/gui/SurfaceTexture.cpp
@@ -142,6 +142,10 @@ SurfaceTexture::SurfaceTexture(GLuint tex, bool allowSynchronousMode,
mUseFenceSync(false),
#endif
mTexTarget(texTarget),
+#ifdef STE_HARDWARE
+ mNextBlitSlot(0),
+ mNeedsConversion(false),
+#endif
#ifdef QCOM_HARDWARE
mS3DFormat(0),
#endif
@@ -160,11 +164,30 @@ SurfaceTexture::SurfaceTexture(GLuint tex, bool allowSynchronousMode,
mNextBufferInfo.height = 0;
mNextBufferInfo.format = 0;
#endif
+#ifdef STE_HARDWARE
+
+ for (int i = 0; i < NUM_BLIT_BUFFER_SLOTS; i++) {
+ mBlitSlots[i].mEglImage = EGL_NO_IMAGE_KHR;
+ mBlitSlots[i].mEglDisplay = EGL_NO_DISPLAY;
+ }
+
+ hw_module_t const* module;
+ mBlitEngine = 0;
+ if (hw_get_module(COPYBIT_HARDWARE_MODULE_ID, &module) == 0) {
+ copybit_open(module, &mBlitEngine);
+ }
+ LOGE_IF(!mBlitEngine, "\nCannot open copybit mBlitEngine=%p", mBlitEngine);
+#endif
}
SurfaceTexture::~SurfaceTexture() {
ST_LOGV("~SurfaceTexture");
freeAllBuffersLocked();
+#ifdef STE_HARDWARE
+ if (mBlitEngine) {
+ copybit_close(mBlitEngine);
+ }
+#endif
}
status_t SurfaceTexture::setBufferCountServerLocked(int bufferCount) {
@@ -482,6 +505,7 @@ status_t SurfaceTexture::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,
mSlots[buf].mBufferState = BufferSlot::DEQUEUED;
const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer);
+#ifndef STE_HARDWARE
#ifdef QCOM_HARDWARE
qBufGeometry currentGeometry;
if (buffer != NULL)
@@ -511,6 +535,14 @@ status_t SurfaceTexture::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,
mGraphicBufferAlloc->freeGraphicBufferAtIndex(buf);
}
#endif
+#else
+ if ((buffer == NULL) ||
+ (uint32_t(buffer->width) != w) ||
+ (uint32_t(buffer->height) != h) ||
+ (uint32_t(buffer->format) != format) ||
+ ((uint32_t(buffer->usage) & usage) != usage))
+ {
+#endif
usage |= GraphicBuffer::USAGE_HW_TEXTURE;
status_t error;
sp<GraphicBuffer> graphicBuffer(
@@ -849,7 +881,16 @@ status_t SurfaceTexture::setScalingMode(int mode) {
return OK;
}
+#ifndef STE_HARDWARE
status_t SurfaceTexture::updateTexImage(bool isComposition) {
+#else
+status_t SurfaceTexture::updateTexImage() {
+ return updateTexImage(false);
+}
+
+#define STE_DEFERDBG 0
+status_t SurfaceTexture::updateTexImage(bool deferConversion) {
+#endif
ST_LOGV("updateTexImage");
Mutex::Autolock lock(mMutex);
@@ -865,6 +906,7 @@ status_t SurfaceTexture::updateTexImage(bool isComposition) {
int buf = *front;
// Update the GL texture object.
+#ifndef STE_HARDWARE
EGLImageKHR image = mSlots[buf].mEglImage;
EGLDisplay dpy = eglGetCurrentDisplay();
#ifdef QCOM_HARDWARE
@@ -894,6 +936,101 @@ status_t SurfaceTexture::updateTexImage(bool isComposition) {
// NOTE: if dpy was invalid, createImage() is guaranteed to
// fail. so we'd end up here.
return -EINVAL;
+#else
+ EGLImageKHR image;
+ EGLDisplay dpy = eglGetCurrentDisplay();
+ sp<GraphicBuffer> graphicBuffer;
+ if (conversionIsNeeded(mSlots[buf].mGraphicBuffer)) {
+ mNeedsConversion = deferConversion;
+ // If color conversion is needed we can't use the graphic buffers
+ // located in mSlots for the textures (wrong color format). Instead
+ // color convert it into a buffer in mBlitSlots and use that instead.
+ image = mBlitSlots[mNextBlitSlot].mEglImage;
+
+ // If there exists an image already, make sure that
+ // the dimensions match the current source buffer.
+ // Otherwise, destroy the buffer and let a new one be allocated.
+ if (image != EGL_NO_IMAGE_KHR &&
+ mSlots[buf].mGraphicBuffer != NULL &&
+ mBlitSlots[mNextBlitSlot].mGraphicBuffer != NULL) {
+ sp<GraphicBuffer> &srcBuf = mSlots[buf].mGraphicBuffer;
+ sp<GraphicBuffer> &bltBuf =
+ mBlitSlots[mNextBlitSlot].mGraphicBuffer;
+ if (srcBuf->getWidth() != bltBuf->getWidth() ||
+ srcBuf->getHeight() != bltBuf->getHeight()) {
+ eglDestroyImageKHR(mBlitSlots[mNextBlitSlot].mEglDisplay,
+ image);
+ mBlitSlots[mNextBlitSlot].mEglImage = EGL_NO_IMAGE_KHR;
+ mBlitSlots[mNextBlitSlot].mGraphicBuffer = NULL;
+ image = EGL_NO_IMAGE_KHR;
+ }
+ }
+ if (image == EGL_NO_IMAGE_KHR) {
+ sp<GraphicBuffer> &srcBuf = mSlots[buf].mGraphicBuffer;
+ status_t res = 0;
+
+ sp<GraphicBuffer> blitBuffer(
+ mGraphicBufferAlloc->createGraphicBuffer(
+ srcBuf->getWidth(), srcBuf->getHeight(),
+ PIXEL_FORMAT_RGBA_8888, srcBuf->getUsage(),
+ &res));
+ if (blitBuffer == 0) {
+ ST_LOGE("updateTexImage: SurfaceComposer::createGraphicBuffer failed");
+ return NO_MEMORY;
+ }
+ if (res != NO_ERROR) {
+ ST_LOGW("updateTexImage: SurfaceComposer::createGraphicBuffer error=%#04x", res);
+ }
+ mBlitSlots[mNextBlitSlot].mGraphicBuffer = blitBuffer;
+
+ EGLDisplay dpy = eglGetCurrentDisplay();
+ image = createImage(dpy, blitBuffer);
+ mBlitSlots[mNextBlitSlot].mEglImage = image;
+ mBlitSlots[mNextBlitSlot].mEglDisplay = dpy;
+ }
+
+ if (deferConversion) {
+ graphicBuffer = mSlots[buf].mGraphicBuffer;
+ mConversionSrcSlot = buf;
+ mConversionBltSlot = mNextBlitSlot;
+ // At this point graphicBuffer and image do not point
+ // at matching buffers. This is intentional as this
+ // surface might end up being taken care of by HWComposer,
+ // which needs access to the original buffer.
+ // GL however, is fed an EGLImage that is created from
+ // a conversion buffer. It will have its
+ // content updated once the surface is actually drawn
+ // in Layer::onDraw()
+ } else {
+ if (convert(mSlots[buf].mGraphicBuffer,
+ mBlitSlots[mNextBlitSlot].mGraphicBuffer) != OK) {
+ LOGE("updateTexImage: convert failed");
+ return UNKNOWN_ERROR;
+ }
+ graphicBuffer = mBlitSlots[mNextBlitSlot].mGraphicBuffer;
+ }
+ // mBlitSlots contains several buffers (NUM_BLIT_BUFFER_SLOTS),
+ // advance (potentially wrap) the index
+ mNextBlitSlot = (mNextBlitSlot + 1) % NUM_BLIT_BUFFER_SLOTS;
+ } else {
+ mNeedsConversion = false;
+ image = mSlots[buf].mEglImage;
+ graphicBuffer = mSlots[buf].mGraphicBuffer;
+ if (image == EGL_NO_IMAGE_KHR) {
+ EGLDisplay dpy = eglGetCurrentDisplay();
+ if (graphicBuffer == 0) {
+ ST_LOGE("buffer at slot %d is null", buf);
+ return BAD_VALUE;
+ }
+ image = createImage(dpy, graphicBuffer);
+ mSlots[buf].mEglImage = image;
+ mSlots[buf].mEglDisplay = dpy;
+ if (image == EGL_NO_IMAGE_KHR) {
+ // NOTE: if dpy was invalid, createImage() is guaranteed to
+ // fail. so we'd end up here.
+ return -EINVAL;
+ }
+#endif
}
}
@@ -947,7 +1084,11 @@ status_t SurfaceTexture::updateTexImage(bool isComposition) {
// Update the SurfaceTexture state.
mCurrentTexture = buf;
+#ifndef STE_HARDWARE
mCurrentTextureBuf = mSlots[buf].mGraphicBuffer;
+#else
+ mCurrentTextureBuf = graphicBuffer;
+#endif
mCurrentCrop = mSlots[buf].mCrop;
mCurrentTransform = mSlots[buf].mTransform;
mCurrentScalingMode = mSlots[buf].mScalingMode;
@@ -973,8 +1114,18 @@ bool SurfaceTexture::isExternalFormat(uint32_t format)
case HAL_PIXEL_FORMAT_YV12:
// Legacy/deprecated YUV formats
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+#ifndef STE_HARDWARE
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+#else
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP:
+#endif
case HAL_PIXEL_FORMAT_YCbCr_422_I:
+#ifdef STE_HARDWARE
+ case HAL_PIXEL_FORMAT_YCrCb_422_SP:
+ case HAL_PIXEL_FORMAT_YCrCb_422_P:
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ case HAL_PIXEL_FORMAT_YCrCb_420_P:
+#endif
return true;
}
@@ -1119,6 +1270,16 @@ void SurfaceTexture::freeAllBuffersLocked() {
#ifdef QCOM_HARDWARE
mGraphicBufferAlloc->freeAllGraphicBuffersExcept(-1);
#endif
+#ifdef STE_HARDWARE
+ for (int i = 0; i < NUM_BLIT_BUFFER_SLOTS; i++) {
+ mBlitSlots[i].mGraphicBuffer = 0;
+ if (mBlitSlots[i].mEglImage != EGL_NO_IMAGE_KHR) {
+ eglDestroyImageKHR(mBlitSlots[i].mEglDisplay, mBlitSlots[i].mEglImage);
+ mBlitSlots[i].mEglImage = EGL_NO_IMAGE_KHR;
+ mBlitSlots[i].mEglDisplay = EGL_NO_DISPLAY;
+ }
+ }
+#endif
}
void SurfaceTexture::freeAllBuffersExceptHeadLocked() {
@@ -1331,6 +1492,84 @@ void SurfaceTexture::dump(String8& result, const char* prefix,
}
}
+#ifdef STE_HARDWARE
+bool SurfaceTexture::conversionIsNeeded(const sp<GraphicBuffer>& graphicBuffer) {
+ int fmt = graphicBuffer->getPixelFormat();
+ return (fmt == PIXEL_FORMAT_YCBCR42XMBN) || (fmt == PIXEL_FORMAT_YCbCr_420_P);
+}
+
+status_t SurfaceTexture::convert() {
+ if (!mNeedsConversion)
+ return NO_ERROR;
+
+ if (mConversionBltSlot < 0 ||
+ mConversionBltSlot >= NUM_BLIT_BUFFER_SLOTS ||
+ mConversionSrcSlot < 0 ||
+ mConversionSrcSlot >= NUM_BUFFER_SLOTS) {
+ LOGE_IF(STE_DEFERDBG, "%s: Incorrect setup for deferred "
+ "texture conversion:\n"
+ "mConversionSrcSlot=%d mConversionBltSlot=%d", __FUNCTION__,
+ mConversionSrcSlot, mConversionBltSlot);
+ return BAD_VALUE;
+ }
+
+ if (mSlots[mConversionSrcSlot].mGraphicBuffer == NULL) {
+ LOGI_IF(STE_DEFERDBG, "%s: NULL source for deferred texture conversion.",
+ __FUNCTION__);
+ return OK;
+ }
+
+ if (mBlitSlots[mConversionBltSlot].mGraphicBuffer == NULL) {
+ LOGI_IF(STE_DEFERDBG, "%s: NULL destination for deferred "
+ "texture conversion.", __FUNCTION__);
+ return OK;
+ }
+
+ return convert(mSlots[mConversionSrcSlot].mGraphicBuffer,
+ mBlitSlots[mConversionBltSlot].mGraphicBuffer);
+}
+
+status_t SurfaceTexture::convert(sp<GraphicBuffer> &srcBuf, sp<GraphicBuffer> &dstBuf) {
+ copybit_image_t dstImg;
+ dstImg.w = dstBuf->getWidth();
+ dstImg.h = dstBuf->getHeight();
+ dstImg.format = dstBuf->getPixelFormat();
+ dstImg.handle = (native_handle_t*) dstBuf->getNativeBuffer()->handle;
+
+ copybit_image_t srcImg;
+ srcImg.w = srcBuf->getWidth();
+ srcImg.h = srcBuf->getHeight();
+ srcImg.format = srcBuf->getPixelFormat();
+ srcImg.base = NULL;
+ srcImg.handle = (native_handle_t*) srcBuf->getNativeBuffer()->handle;
+
+ copybit_rect_t dstCrop;
+ dstCrop.l = 0;
+ dstCrop.t = 0;
+ dstCrop.r = dstBuf->getWidth();
+ dstCrop.b = dstBuf->getHeight();
+
+ copybit_rect_t srcCrop;
+ srcCrop.l = 0;
+ srcCrop.t = 0;
+ srcCrop.r = srcBuf->getWidth();
+ srcCrop.b = srcBuf->getHeight();
+
+ region_iterator clip(Region(Rect(dstCrop.r, dstCrop.b)));
+ mBlitEngine->set_parameter(mBlitEngine, COPYBIT_TRANSFORM, 0);
+ mBlitEngine->set_parameter(mBlitEngine, COPYBIT_PLANE_ALPHA, 0xFF);
+ mBlitEngine->set_parameter(mBlitEngine, COPYBIT_DITHER, COPYBIT_ENABLE);
+
+ int err = mBlitEngine->stretch(
+ mBlitEngine, &dstImg, &srcImg, &dstCrop, &srcCrop, &clip);
+ if (err != 0) {
+ LOGE("\nError: Blit stretch operation failed (err:%d)\n", err);
+ return UNKNOWN_ERROR;
+ }
+ return OK;
+}
+#endif
+
static void mtxMul(float out[16], const float a[16], const float b[16]) {
out[0] = a[0]*b[0] + a[4]*b[1] + a[8]*b[2] + a[12]*b[3];
out[1] = a[1]*b[0] + a[5]*b[1] + a[9]*b[2] + a[13]*b[3];
diff --git a/libs/ui/PixelFormat.cpp b/libs/ui/PixelFormat.cpp
index ee186c84de9..b34cd9110f2 100644
--- a/libs/ui/PixelFormat.cpp
+++ b/libs/ui/PixelFormat.cpp
@@ -59,11 +59,29 @@ status_t getPixelFormatInfo(PixelFormat format, PixelFormatInfo* info)
// YUV format from the HAL are handled here
switch (format) {
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+#ifdef STE_HARDWARE
+ case HAL_PIXEL_FORMAT_YCrCb_422_SP:
+ case HAL_PIXEL_FORMAT_YCbCr_422_P:
+#endif
case HAL_PIXEL_FORMAT_YCbCr_422_I:
+#ifdef STE_HARDWARE
+ case HAL_PIXEL_FORMAT_CbYCrY_422_I:
+#endif
info->bitsPerPixel = 16;
goto done;
+#ifdef STE_HARDWARE
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP:
+#endif
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
case HAL_PIXEL_FORMAT_YV12:
+#ifdef STE_HARDWARE
+ case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP_TILED:
+ case HAL_PIXEL_FORMAT_YCbCr_420_P:
+ case HAL_PIXEL_FORMAT_YCbCr_420_I:
+ case HAL_PIXEL_FORMAT_CbYCrY_420_I:
+ case HAL_PIXEL_FORMAT_YCBCR42XMBN:
+#endif
info->bitsPerPixel = 12;
done:
info->format = format;
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index d79df778243..173b6415569 100755
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -120,6 +120,12 @@ static int32_t getColorFormat(const char* colorFormat) {
return OMX_TI_COLOR_FormatYUV420PackedSemiPlanar;
}
+#ifdef STE_HARDWARE
+ if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420MB)) {
+ return OMX_STE_COLOR_FormatYUV420PackedSemiPlanarMB;
+ }
+#endif
+
LOGE("Uknown color format (%s), please add it to "
"CameraSource::getColorFormat", colorFormat);
@@ -560,13 +566,23 @@ status_t CameraSource::initWithCameraAccess(
// XXX: query camera for the stride and slice height
// when the capability becomes available.
+#ifdef STE_HARDWARE
+ int stride = newCameraParams.getInt(CameraParameters::KEY_RECORD_STRIDE);
+ int sliceHeight = newCameraParams.getInt(CameraParameters::KEY_RECORD_SLICE_HEIGHT);
+#endif
+
mMeta = new MetaData;
mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
mMeta->setInt32(kKeyColorFormat, mColorFormat);
mMeta->setInt32(kKeyWidth, mVideoSize.width);
mMeta->setInt32(kKeyHeight, mVideoSize.height);
+#ifndef STE_HARDWARE
mMeta->setInt32(kKeyStride, mVideoSize.width);
mMeta->setInt32(kKeySliceHeight, mVideoSize.height);
+#else
+ mMeta->setInt32(kKeyStride, stride != -1 ? stride : mVideoSize.width);
+ mMeta->setInt32(kKeySliceHeight, sliceHeight != -1 ? sliceHeight : mVideoSize.height);
+#endif
mMeta->setInt32(kKeyFrameRate, mVideoFrameRate);
#ifdef QCOM_HARDWARE
mMeta->setInt32(kKeyHFR, hfr);
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index d022b6e3220..beb5eb15822 100755
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -25,8 +25,14 @@ const char *MEDIA_MIMETYPE_VIDEO_VPX = "video/x-vnd.on2.vp8";
const char *MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
const char *MEDIA_MIMETYPE_VIDEO_MPEG4 = "video/mp4v-es";
const char *MEDIA_MIMETYPE_VIDEO_H263 = "video/3gpp";
+#ifdef STE_HARDWARE
+const char *MEDIA_MIMETYPE_VIDEO_H263_SW = "video/3gpp-sw";
+#endif
const char *MEDIA_MIMETYPE_VIDEO_MPEG2 = "video/mpeg2";
const char *MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
+#ifdef STE_HARDWARE
+const char *MEDIA_MIMETYPE_VIDEO_VC1 = "video/vc1";
+#endif
const char *MEDIA_MIMETYPE_AUDIO_AMR_NB = "audio/3gpp";
const char *MEDIA_MIMETYPE_AUDIO_AMR_WB = "audio/amr-wb";
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 60ea9dfb956..8a430399465 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -234,6 +234,9 @@ static const CodecInfo kDecoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_VPX, "OMX.SEC.vp8.dec" },
#endif
{ MEDIA_MIMETYPE_IMAGE_JPEG, "OMX.TI.JPEG.decode" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.ST.mp3.decoder" },
+#endif
// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.TI.MP3.decode" },
{ MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.google.mp3.decoder" },
#ifdef WITH_QCOM_LPA
@@ -246,6 +249,9 @@ static const CodecInfo kDecoderInfo[] = {
// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amrwb.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.decode" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.google.amrwb.decoder" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.ST.aac.decoder" },
+#endif
// { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.Nvidia.aac.decoder" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.decode" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.google.aac.decoder" },
@@ -254,6 +260,9 @@ static const CodecInfo kDecoderInfo[] = {
#endif
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW, "OMX.google.g711.alaw.decoder" },
{ MEDIA_MIMETYPE_AUDIO_G711_MLAW, "OMX.google.g711.mlaw.decoder" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.ST.VFM.MPEG4Dec" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.DUCATI1.VIDEO.DECODER" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.decode" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.decoder.mpeg4" },
@@ -261,12 +270,19 @@ static const CodecInfo kDecoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.google.mpeg4.decoder" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_H263, "OMX.ST.VFM.MPEG4Dec" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.DUCATI1.VIDEO.DECODER" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.decode" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.decoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.decoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.google.h263.decoder" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_H263_SW, "OMX.ST.VFM.MPEG4HostDec" },
+ { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.ST.VFM.H264Dec" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.DUCATI1.VIDEO.DECODER" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.decode" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.decoder.avc" },
@@ -280,6 +296,9 @@ static const CodecInfo kDecoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_VPX, "OMX.SEC.VP8.Decoder" },
{ MEDIA_MIMETYPE_VIDEO_VPX, "OMX.google.vpx.decoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG2, "OMX.Nvidia.mpeg2v.decode" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_VC1, "OMX.ST.VFM.VC1Dec" },
+#endif
#ifdef QCOM_HARDWARE
{ MEDIA_MIMETYPE_VIDEO_DIVX, "OMX.qcom.video.decoder.divx"},
{ MEDIA_MIMETYPE_VIDEO_DIVX311, "OMX.qcom.video.decoder.divx311"},
@@ -312,6 +331,9 @@ static const CodecInfo kEncoderInfo[] = {
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.encode" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "OMX.qcom.audio.encoder.aac" },
{ MEDIA_MIMETYPE_AUDIO_AAC, "AACEncoder" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.ST.VFM.MPEG4Enc" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.DUCATI1.VIDEO.MPEG4E" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.encoder.mpeg4" },
#ifdef QCOM_HARDWARE
@@ -323,6 +345,9 @@ static const CodecInfo kEncoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Encoder" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_H263, "OMX.ST.VFM.MPEG4Enc" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.DUCATI1.VIDEO.MPEG4E" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.encoder.h263" },
@@ -330,6 +355,9 @@ static const CodecInfo kEncoderInfo[] = {
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.encoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Encoder" },
{ MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Encoder" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.ST.VFM.H264Enc" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.DUCATI1.VIDEO.H264E" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.encoder.avc" },
@@ -445,6 +473,20 @@ static int CompareSoftwareCodecsFirst(
return 0;
}
+#ifdef STE_HARDWARE
+static uint32_t OmxToHALFormat(OMX_COLOR_FORMATTYPE omxValue) {
+ switch (omxValue) {
+ case OMX_STE_COLOR_FormatYUV420PackedSemiPlanarMB:
+ return HAL_PIXEL_FORMAT_YCBCR42XMBN;
+ case OMX_COLOR_FormatYUV420Planar:
+ return HAL_PIXEL_FORMAT_YCbCr_420_P;
+ default:
+ LOGI("Unknown OMX pixel format (0x%X), passing it on unchanged", omxValue);
+ return omxValue;
+ }
+}
+#endif
+
// static
uint32_t OMXCodec::getComponentQuirks(
const char *componentName, bool isEncoder) {
@@ -567,6 +609,19 @@ uint32_t OMXCodec::getComponentQuirks(
}
#endif
+#ifdef STE_HARDWARE
+ if (!isEncoder && !strncmp(componentName, "OMX.ST.VFM.", 11)) {
+ quirks |= kRequiresAllocateBufferOnInputPorts;
+ quirks |= kRequiresAllocateBufferOnOutputPorts;
+ }
+
+ if (!strncmp(componentName, "OMX.ST.VFM.MPEG4Enc", 19) ||
+ !strncmp(componentName, "OMX.ST.VFM.H264Enc", 18)) {
+ quirks |= kRequiresAllocateBufferOnOutputPorts;
+ quirks |= kRequiresStoreMetaDataBeforeIdle;
+ }
+#endif
+
return quirks;
}
@@ -1276,6 +1331,9 @@ static size_t getFrameSize(
case OMX_COLOR_FormatYUV420Planar:
case OMX_COLOR_FormatYUV420SemiPlanar:
case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
+#ifdef STE_HARDWARE
+ case OMX_STE_COLOR_FormatYUV420PackedSemiPlanarMB:
+#endif
/*
* FIXME: For the Opaque color format, the frame size does not
* need to be (w*h*3)/2. It just needs to
@@ -1933,7 +1991,11 @@ status_t OMXCodec::setVideoOutputFormat(
|| format.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar
|| format.eColorFormat == OMX_COLOR_FormatCbYCrY
|| format.eColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
+#ifndef STE_HARDWARE
|| format.eColorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar
+#else
+ || format.eColorFormat == OMX_STE_COLOR_FormatYUV420PackedSemiPlanarMB
+#endif
#ifdef QCOM_HARDWARE
|| format.eColorFormat == QOMX_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka
#endif
@@ -2106,6 +2168,10 @@ void OMXCodec::setComponentRole(
"video_decoder.mpeg4", "video_encoder.mpeg4" },
{ MEDIA_MIMETYPE_VIDEO_H263,
"video_decoder.h263", "video_encoder.h263" },
+#ifdef STE_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_VC1,
+ "video_decoder.vc1", "video_encoder.vc1" },
+#endif
#ifdef QCOM_HARDWARE
{ MEDIA_MIMETYPE_VIDEO_DIVX,
"video_decoder.divx", NULL },
@@ -2183,6 +2249,17 @@ status_t OMXCodec::init() {
CHECK_EQ((int)mState, (int)LOADED);
status_t err;
+#ifdef STE_HARDWARE
+ if ((mQuirks & kRequiresStoreMetaDataBeforeIdle)
+ && (mFlags & kStoreMetaDataInVideoBuffers)) {
+ err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE);
+ if (err != OK) {
+ LOGE("Storing meta data in video buffers is not supported");
+ return err;
+ }
+ }
+#endif
+
if (!(mQuirks & kRequiresLoadedToIdleAfterAllocation)) {
err = mOMX->sendCommand(mNode, OMX_CommandStateSet, OMX_StateIdle);
CHECK_EQ(err, (status_t)OK);
@@ -2246,7 +2323,12 @@ status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
}
status_t err = OK;
+#ifndef STE_HARDWARE
if ((mFlags & kStoreMetaDataInVideoBuffers)
+#else
+ if (!(mQuirks & kRequiresStoreMetaDataBeforeIdle)
+ && (mFlags & kStoreMetaDataInVideoBuffers)
+#endif
&& portIndex == kPortIndexInput) {
LOGW("Trying to enable metadata mode on encoder");
err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE);
@@ -2462,7 +2544,11 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
#else
def.format.video.nFrameWidth,
def.format.video.nFrameHeight,
+#ifndef STE_HARDWARE
def.format.video.eColorFormat);
+#else
+ OmxToHALFormat(def.format.video.eColorFormat));
+#endif
#endif
#else
OMX_COLOR_FORMATTYPE eColorFormat;
@@ -5351,6 +5437,9 @@ static const char *videoCompressionFormatString(OMX_VIDEO_CODINGTYPE type) {
"OMX_VIDEO_CodingRV",
"OMX_VIDEO_CodingAVC",
"OMX_VIDEO_CodingMJPEG",
+#ifdef STE_HARDWARE
+ "OMX_VIDEO_CodingVC1",
+#endif
};
size_t numNames = sizeof(kNames) / sizeof(kNames[0]);
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 5bda1f6c0a4..cdbe810621c 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -54,6 +54,9 @@ bool ColorConverter::isValid() const {
case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
case OMX_COLOR_FormatYUV420SemiPlanar:
case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
+#ifdef STE_HARDWARE
+ case OMX_STE_COLOR_FormatYUV420PackedSemiPlanarMB:
+#endif
#ifdef QCOM_HARDWARE
case QOMX_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka:
#endif
@@ -555,6 +558,145 @@ status_t ColorConverter::convertTIYUV420PackedSemiPlanar(
return OK;
}
+#ifdef STE_HARDWARE
+status_t ColorConverter::convertSTEYUV420PackedSemiPlanarMB(
+ const BitmapParams &src, const BitmapParams &dst) {
+
+ if (!((dst.mWidth & 1) == 0
+ && src.mCropLeft == 0
+ && src.mCropTop == 0
+ && src.cropWidth() == dst.cropWidth()
+ && src.cropHeight() == dst.cropHeight())) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ OMX_U32 mx = src.mWidth / 16;
+ OMX_U32 my = src.mHeight / 16;
+ OMX_U32 lx, ly;
+ OMX_U32 *pChroma, *pLuma = (OMX_U32 *)src.mBits;
+
+ pChroma = (OMX_U32 *)src.mBits + mx * my * 64;
+ for (ly = 0; ly < my; ly++) {
+ for (lx = 0; lx < mx; lx++) {
+ OMX_U32 col, row, lumaWord, chromaWord1 = 0, rgbWord, i;
+ OMX_U8 y[4], cb[4], cr[4], r[4], g[4], b[4];
+ OMX_U32 *dstBuf, *locBuf;
+ OMX_U32 *pBurstLuma = 0, *pBurstChroma = 0;
+ OMX_U32 *pWordLuma = 0, *pWordChroma = 0;
+ OMX_U8 nbOfBlock;
+
+ dstBuf = ((OMX_U32 *)dst.mBits) + (ly * 16) * dst.mWidth / 2;
+ dstBuf += (lx * 16) / 2;
+
+ pBurstLuma = pLuma;
+ pBurstChroma = pChroma;
+
+ for (col = 0; col < 2; col++) {
+ // conversion of a macroblock
+ for (nbOfBlock = 0; nbOfBlock < 2; nbOfBlock++) {
+ locBuf = dstBuf + 4 * col + 2 * nbOfBlock;
+ OMX_U32 dstRowOrigo = ly * 16 * dst.mWidth;
+
+ switch (nbOfBlock) {
+ case 0:
+ pWordLuma = pBurstLuma;
+ pWordChroma = pBurstChroma;
+ break;
+ case 1:
+ pWordLuma = pBurstLuma + 1;
+ pWordChroma = pBurstChroma + 1;
+ break;
+ }
+ for (row = 0; row < 16; row++) {
+
+ // Check for cropping on the y axis
+ if (ly * 16 + row >= dst.mHeight) {
+ break;
+ }
+
+ lumaWord = *pWordLuma;
+ pWordLuma += 2;
+ if (row % 2 == 0) {
+ chromaWord1 = *pWordChroma;
+ pWordChroma += 2;
+ }
+
+ y[3] = ((lumaWord >> 24) & 0xff);
+ y[2] = ((lumaWord >> 16) & 0xff);
+ y[1] = ((lumaWord >> 8) & 0xff);
+ y[0] = ((lumaWord >> 0) & 0xff);
+
+ cb[0] = cb[1] = ((chromaWord1 >> 0) & 0xff);
+ cb[2] = cb[3] = ((chromaWord1 >> 16) & 0xff);
+ cr[0] = cr[1] = ((chromaWord1 >> 8) & 0xff);
+ cr[2] = cr[3] = ((chromaWord1 >> 24) & 0xff);
+
+ for (i = 0; i < 4; i++) {
+
+ int32_t rW,gW,bW;
+
+ rW = 298 * y[i] + 408 * cr[i] - 57059;
+ gW = 298 * y[i] - 100 * cb[i] - 208 * cr[i] + 34713;
+ bW = 298 * y[i] + 516 * cb[i] - 70887;
+
+ if (rW < 0) {
+ r[i] = 0;
+ } else if (rW >= 65536) {
+ r[i] = 255;
+ } else {
+ r[i] = (rW >> 8);
+ }
+ if (gW < 0) {
+ g[i] = 0;
+ } else if (gW >= 65536) {
+ g[i] = 255;
+ } else {
+ g[i] = (gW >> 8);
+ }
+ if (bW < 0) {
+ b[i] = 0;
+ } else if (bW >= 65536) {
+ b[i] = 255;
+ } else {
+ b[i] = (bW >> 8);
+ }
+ r[i] >>= 3;
+ g[i] >>= 2;
+ b[i] >>= 3;
+ }
+ for (i = 0; i < 4; i += 2) {
+
+ // Check for cropping on the x axis
+ OMX_U32 rowPos = (locBuf - (OMX_U32 *)dst.mBits) * 2 - dstRowOrigo;
+ if (rowPos >= dst.mWidth) {
+ locBuf++;
+ continue;
+ }
+
+ rgbWord = (r[i + 1] << 27) +
+ (g[i + 1] << 21) +
+ (b[i + 1] << 16) +
+ (r[i] << 11) +
+ (g[i] << 5) +
+ (b[i] << 0);
+ *locBuf++ = rgbWord;
+ }
+ locBuf += dst.mWidth / 2 - 2;
+ dstRowOrigo += dst.mWidth;
+ } //end of for 16 loop
+ } //end of 2 block loop
+ pBurstLuma += 32;
+ pBurstChroma += 16;
+ } // end of 2 col loop
+ pLuma += 64;
+ pChroma += 32;
+ }
+ }
+
+ return OK;
+}
+#endif
+
uint8_t *ColorConverter::initClip() {
static const signed kClipMin = -278;
static const signed kClipMax = 535;
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 1e33f05f0ce..bc89092b185 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -34,6 +34,9 @@ static const struct {
const char *mRole;
} kComponents[] = {
+#ifdef STE_HARDWARE
+ { "OMX.ST.aac.decoder", "ste_aacdec", "audio_decoder.aac" },
+#endif
{ "OMX.google.aac.decoder", "aacdec", "audio_decoder.aac" },
{ "OMX.google.amrnb.decoder", "amrdec", "audio_decoder.amrnb" },
{ "OMX.google.amrwb.decoder", "amrdec", "audio_decoder.amrwb" },
@@ -42,6 +45,9 @@ static const struct {
{ "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" },
{ "OMX.google.h263.decoder", "mpeg4dec", "video_decoder.h263" },
{ "OMX.google.mpeg4.decoder", "mpeg4dec", "video_decoder.mpeg4" },
+#ifdef STE_HARDWARE
+ { "OMX.ST.mp3.decoder", "ste_mp3dec", "audio_decoder.mp3" },
+#endif
{ "OMX.google.mp3.decoder", "mp3dec", "audio_decoder.mp3" },
{ "OMX.google.vorbis.decoder", "vorbisdec", "audio_decoder.vorbis" },
{ "OMX.google.vpx.decoder", "vpxdec", "video_decoder.vpx" },
diff --git a/opengl/include/GLES2/gl2ext.h b/opengl/include/GLES2/gl2ext.h
index 82befc2cca3..3bc017af38f 100644
--- a/opengl/include/GLES2/gl2ext.h
+++ b/opengl/include/GLES2/gl2ext.h
@@ -195,6 +195,19 @@ typedef void* GLeglImageOES;
#define GL_Z400_BINARY_AMD 0x8740
#endif
+#ifdef STE_HARDWARE
+/*------------------------------------------------------------------------*
+ * ARM extension tokens
+ *------------------------------------------------------------------------*/
+
+/* GL_ARM_mali_shader_binary */
+#ifndef GL_ARM_mali_shader_binary
+#define GL_MALI_SHADER_BINARY_ARM 0x8F60
+#endif
+/* GL_ARM_rgba8 */
+/* No new tokens introduced by this extension. */
+#endif
+
/*------------------------------------------------------------------------*
* EXT extension tokens
*------------------------------------------------------------------------*/
@@ -610,6 +623,22 @@ typedef void (GL_APIENTRYP PFNGLGETPERFMONITORCOUNTERDATAAMDPROC) (GLuint monito
#define GL_AMD_program_binary_Z400 1
#endif
+
+#ifdef STE_hARDWARE
+/*------------------------------------------------------------------------*
+ * ARM extension functions
+ *------------------------------------------------------------------------*/
+
+/* GL_ARM_mali_shader_binary */
+#ifndef GL_ARM_mali_shader_binary
+#define GL_ARM_mali_shader_binary 1
+#endif
+
+/* GL_ARM_rgba8 */
+#ifndef GL_ARM_rgba8
+#define GL_ARM_rgba8 1
+#endif
+#endif
/*------------------------------------------------------------------------*
* EXT extension functions
*------------------------------------------------------------------------*/
diff --git a/services/surfaceflinger/DisplayHardware/DisplayHardware.cpp b/services/surfaceflinger/DisplayHardware/DisplayHardware.cpp
index 5b96587a9b0..c71328b5b50 100755
--- a/services/surfaceflinger/DisplayHardware/DisplayHardware.cpp
+++ b/services/surfaceflinger/DisplayHardware/DisplayHardware.cpp
@@ -394,6 +394,10 @@ void DisplayHardware::flip(const Region& dirty) const
if (mHwc->initCheck() == NO_ERROR) {
mHwc->commit();
} else {
+#ifdef STE_HARDWARE
+ // Make sure the swapbuffer call is done in sync
+ mNativeWindow->compositionComplete();
+#endif
eglSwapBuffers(dpy, surface);
}
checkEGLErrors("eglSwapBuffers");
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index a621f1b4c99..a3cc0637670 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -289,6 +289,18 @@ void Layer::setPerFrameData(hwc_layer_t* hwcl) {
void Layer::onDraw(const Region& clip) const
{
+#ifdef STE_HARDWARE
+ // Convert the texture to a native format if need be.
+ // convert() returns immediately if no conversion is necessary.
+ if (mSurfaceTexture != NULL) {
+ status_t res = mSurfaceTexture->convert();
+ if (res != NO_ERROR) {
+ LOGE("Layer::onDraw: texture conversion failed. "
+ "Texture content for this layer will not be initialized.");
+ }
+ }
+#endif
+
if (CC_UNLIKELY(mActiveBuffer == 0)) {
// the texture has not been created yet, this Layer has
// in fact never been drawn into. This happens frequently with
@@ -514,7 +526,11 @@ void Layer::lockPageFlip(bool& recomputeVisibleRegions)
if (mSurfaceTexture->updateTexImage(isComposition) < NO_ERROR) {
#else
+#ifndef STE_HARDWARE
if (mSurfaceTexture->updateTexImage() < NO_ERROR) {
+#else
+ if (mSurfaceTexture->updateTexImage(true) < NO_ERROR) {
+#endif
#endif
// something happened!
recomputeVisibleRegions = true;
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 2843b1de846..ca4f155b3b7 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -476,7 +476,9 @@ bool SurfaceFlinger::threadLoop()
#else
// inform the h/w that we're done compositing
logger.log(GraphicLog::SF_COMPOSITION_COMPLETE, index);
+#ifndef STE_HARDWARE
hw.compositionComplete();
+#endif
logger.log(GraphicLog::SF_SWAP_BUFFERS, index);
postFramebuffer();
@@ -485,7 +487,9 @@ bool SurfaceFlinger::threadLoop()
logger.log(GraphicLog::SF_REPAINT_DONE, index);
} else {
// pretend we did the post
+#ifndef STE_HARDWARE
hw.compositionComplete();
+#endif
usleep(16667); // 60 fps period
#ifdef QCOM_HARDWARE
@@ -883,8 +887,10 @@ void SurfaceFlinger::handleRepaint()
// set the frame buffer
const DisplayHardware& hw(graphicPlane(0).displayHardware());
+#ifndef STE_HARDWARE
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
+#endif
uint32_t flags = hw.getFlags();
if ((flags & DisplayHardware::SWAP_RECTANGLE) ||
@@ -953,8 +959,15 @@ bool SurfaceFlinger::isGPULayerPresent()
}
#endif
+#ifdef STE_HARDWARE
+static bool checkDrawingWithGL(hwc_layer_t* const layers, size_t layerCount);
+#endif
+
void SurfaceFlinger::setupHardwareComposer(Region& dirtyInOut)
{
+#ifdef STE_HARDWARE
+ bool useGL = true;
+#endif
const DisplayHardware& hw(graphicPlane(0).displayHardware());
HWComposer& hwc(hw.getHwComposer());
hwc_layer_t* const cur(hwc.getLayers());
@@ -989,6 +1002,23 @@ void SurfaceFlinger::setupHardwareComposer(Region& dirtyInOut)
#ifdef QCOM_HARDWARE
mCanSkipComposition = (hwc.getFlags() & HWC_SKIP_COMPOSITION) ? true : false;
#endif
+#ifdef STE_HARDWARE
+ /*
+ * Check if GL will be used
+ */
+ useGL = checkDrawingWithGL(cur, count);
+
+ if (!useGL) {
+ return;
+ }
+ glMatrixMode(GL_MODELVIEW);
+ glLoadIdentity();
+ if (UNLIKELY(!mWormholeRegion.isEmpty())) {
+ // should never happen unless the window manager has a bug
+ // draw something...
+ drawWormhole();
+ }
+#endif
if (err == NO_ERROR) {
// what's happening here is tricky.
// we want to clear all the layers with the CLEAR_FB flags
@@ -1071,12 +1101,28 @@ void SurfaceFlinger::setupHardwareComposer(Region& dirtyInOut)
}
}
+#ifdef STE_HARDWARE
+static bool checkDrawingWithGL(hwc_layer_t* const layers, size_t layerCount)
+{
+ bool useGL = false;
+ if (layers) {
+ for (size_t i=0 ; i<layerCount ; i++) {
+ if (layers[i].compositionType == HWC_FRAMEBUFFER) {
+ useGL = true;
+ }
+ }
+ }
+ return useGL;
+}
+#endif
+
void SurfaceFlinger::composeSurfaces(const Region& dirty)
{
const DisplayHardware& hw(graphicPlane(0).displayHardware());
HWComposer& hwc(hw.getHwComposer());
const size_t fbLayerCount = hwc.getLayerCount(HWC_FRAMEBUFFER);
+#ifndef STE_HARDWARE
if (UNLIKELY(fbLayerCount && !mWormholeRegion.isEmpty())) {
// should never happen unless the window manager has a bug
// draw something...
@@ -1097,6 +1143,7 @@ void SurfaceFlinger::composeSurfaces(const Region& dirty)
drawWormhole();
#endif
}
+#endif
/*
* and then, render the layers targeted at the framebuffer
@@ -2548,7 +2595,9 @@ status_t SurfaceFlinger::captureScreenImplLocked(DisplayID dpy,
glDeleteRenderbuffersOES(1, &tname);
glDeleteFramebuffersOES(1, &name);
+#ifdef STE_HARDWARE
hw.compositionComplete();
+#endif
// LOGD("screenshot: result = %s", result<0 ? strerror(result) : "OK");
@@ -2776,9 +2825,15 @@ sp<GraphicBuffer> GraphicBufferAlloc::createGraphicBuffer(uint32_t w, uint32_t h
if (err == NO_MEMORY) {
GraphicBuffer::dumpAllocationsToSystemLog();
}
+#ifndef STE_HARDWARE
LOGE("GraphicBufferAlloc::createGraphicBuffer(w=%d, h=%d) "
"failed (%s), handle=%p",
w, h, strerror(-err), graphicBuffer->handle);
+#else
+ LOGE("GraphicBufferAlloc::createGraphicBuffer(w=%d, h=%d, format=%#x) "
+ "failed (%s), handle=%p",
+ w, h, format, strerror(-err), graphicBuffer->handle);
+#endif
return 0;
}
#ifdef QCOM_HARDWARE