diff options
| author | Chang Ying <ying.chang@intel.com> | 2012-09-12 16:21:51 +0800 |
|---|---|---|
| committer | Patrick Tjin <pattjin@google.com> | 2014-07-21 22:02:48 -0700 |
| commit | 28516617d7c679a9d1c4d1a5a29af157eb25cc29 (patch) | |
| tree | fba2aff4bc1bb899fb872247d1f1ab7343e25f7f | |
| parent | 124cae864848ce34be2113421ca73194802fb029 (diff) | |
| download | android_hardware_intel_common_omx-components-28516617d7c679a9d1c4d1a5a29af157eb25cc29.tar.gz android_hardware_intel_common_omx-components-28516617d7c679a9d1c4d1a5a29af157eb25cc29.tar.bz2 android_hardware_intel_common_omx-components-28516617d7c679a9d1c4d1a5a29af157eb25cc29.zip | |
AVCE: Add AndroidOpaque input color format support
BZ: 34659
Briefly, this implies that encoder can use texture object as
source buffer. On our platform texture object is represented as
buffer_handle_t and in RGB color format, however encoder only supports
YUV color space, so a color space conversion is needed.
Change-Id: Iad91a911184e0c6e66576b3c0bd402f1636b933f
Signed-off-by: Chang Ying <ying.chang@intel.com>
Reviewed-on: http://android.intel.com:8080/66722
Reviewed-by: Jiang, Fei <fei.jiang@intel.com>
Reviewed-by: Shi, PingX <pingx.shi@intel.com>
Tested-by: Shi, PingX <pingx.shi@intel.com>
Reviewed-by: buildbot <buildbot@intel.com>
Tested-by: buildbot <buildbot@intel.com>
| -rw-r--r-- | videocodec/Android.mk | 6 | ||||
| -rw-r--r-- | videocodec/OMXVideoEncoderAVC.cpp | 13 | ||||
| -rw-r--r-- | videocodec/OMXVideoEncoderBase.cpp | 102 | ||||
| -rw-r--r-- | videocodec/OMXVideoEncoderBase.h | 21 | ||||
| -rw-r--r-- | videocodec/OMXVideoEncoderH263.cpp | 12 | ||||
| -rw-r--r-- | videocodec/OMXVideoEncoderMPEG4.cpp | 11 | ||||
| -rw-r--r-- | videocodec/hal_public.h | 182 |
7 files changed, 344 insertions, 3 deletions
diff --git a/videocodec/Android.mk b/videocodec/Android.mk index b9bbc42..aecdcb8 100644 --- a/videocodec/Android.mk +++ b/videocodec/Android.mk @@ -137,6 +137,7 @@ LOCAL_SHARED_LIBRARIES := \ libva-android \ libva-tpi \ libutils \ + libhardware \ libintelmetadatabuffer LOCAL_C_INCLUDES := \ @@ -145,6 +146,7 @@ LOCAL_C_INCLUDES := \ $(TARGET_OUT_HEADERS)/libmix_videoencoder \ $(TARGET_OUT_HEADERS)/libva \ $(TARGET_OUT_HEADERS)/libsharedbuffer \ + $(TOP)/hardware/libhardware/include \ $(TOP)/frameworks/native/include/media/hardware \ $(TOP)/frameworks/native/include/media/openmax @@ -171,6 +173,7 @@ LOCAL_SHARED_LIBRARIES := \ libva-android \ libva-tpi \ libutils \ + libhardware \ libintelmetadatabuffer LOCAL_C_INCLUDES := \ @@ -179,6 +182,7 @@ LOCAL_C_INCLUDES := \ $(TARGET_OUT_HEADERS)/libmix_videoencoder \ $(TARGET_OUT_HEADERS)/libva \ $(TARGET_OUT_HEADERS)/libsharedbuffer \ + $(TOP)/hardware/libhardware/include \ $(TOP)/frameworks/native/include/media/hardware \ $(TOP)/frameworks/native/include/media/openmax @@ -204,6 +208,7 @@ LOCAL_SHARED_LIBRARIES := \ libva-android \ libva-tpi \ libutils \ + libhardware \ libintelmetadatabuffer LOCAL_C_INCLUDES := \ @@ -212,6 +217,7 @@ LOCAL_C_INCLUDES := \ $(TARGET_OUT_HEADERS)/libmix_videoencoder \ $(TARGET_OUT_HEADERS)/libva \ $(TARGET_OUT_HEADERS)/libsharedbuffer \ + $(TOP)/hardware/libhardware/include \ $(TOP)/frameworks/native/include/media/hardware \ $(TOP)/frameworks/native/include/media/openmax diff --git a/videocodec/OMXVideoEncoderAVC.cpp b/videocodec/OMXVideoEncoderAVC.cpp index 29a0f34..9a39570 100644 --- a/videocodec/OMXVideoEncoderAVC.cpp +++ b/videocodec/OMXVideoEncoderAVC.cpp @@ -19,6 +19,7 @@ #define LOG_TAG "OMXVideoEncoderAVC" #include <utils/Log.h> #include "OMXVideoEncoderAVC.h" +#include "IntelMetadataBuffer.h" static const char *AVC_MIME_TYPE = "video/h264"; @@ -190,6 +191,10 @@ OMX_ERRORTYPE OMXVideoEncoderAVC::ProcessorProcess( goto out; } + if (bAndroidOpaqueFormat) { + mCurHandle = rgba2nv12conversion(buffers[INPORT_INDEX]); + } + inBuf.data = buffers[INPORT_INDEX]->pBuffer + buffers[INPORT_INDEX]->nOffset; inBuf.size = buffers[INPORT_INDEX]->nFilledLen; @@ -429,6 +434,14 @@ out: if (retains[OUTPORT_INDEX] == BUFFER_RETAIN_NOT_RETAIN) mFrameOutputCount ++; + if (bAndroidOpaqueFormat && buffers[INPORT_INDEX]->nFilledLen != 0) { + // Restore input buffer's content + buffers[INPORT_INDEX]->nFilledLen = 4 + sizeof(buffer_handle_t); + memcpy(buffers[INPORT_INDEX]->pBuffer, mBufferHandleMaps[mCurHandle].backBuffer, + buffers[INPORT_INDEX]->nFilledLen); + + } + #if 0 if (avcEncParamIntelBitrateType.eControlRate != OMX_Video_Intel_ControlRateVideoConferencingMode) { if (oret == (OMX_ERRORTYPE) OMX_ErrorIntelExtSliceSizeOverflow) { diff --git a/videocodec/OMXVideoEncoderBase.cpp b/videocodec/OMXVideoEncoderBase.cpp index ae74572..b45592f 100644 --- a/videocodec/OMXVideoEncoderBase.cpp +++ b/videocodec/OMXVideoEncoderBase.cpp @@ -35,6 +35,7 @@ OMXVideoEncoderBase::OMXVideoEncoderBase() mEncoderParams = new VideoParamsCommon(); if (!mEncoderParams) LOGE("OMX_ErrorInsufficientResources"); + bAndroidOpaqueFormat = OMX_FALSE; LOGV("OMXVideoEncoderBase::OMXVideoEncoderBase end"); } @@ -349,6 +350,38 @@ OMX_ERRORTYPE OMXVideoEncoderBase::ProcessorInit(void) { ret = SetVideoEncoderParam(); CHECK_STATUS("SetVideoEncoderParam"); + if (bAndroidOpaqueFormat) { + hw_module_t const* module; + int err = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module); + if (err == 0) { + mGrallocMod = (IMG_gralloc_module_public_t const*)module; + gralloc_open(module, &mAllocDev); + for (int i = 0; i < INPORT_ACTUAL_BUFFER_COUNT; i++) { + status_t err = mAllocDev->alloc(mAllocDev, + mEncoderParams->resolution.width, + mEncoderParams->resolution.height, + OMX_INTEL_COLOR_FormatYUV420PackedSemiPlanar, + GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE, + (buffer_handle_t*)&(mBufferHandleMaps[i].mHandle), + &mBufferHandleMaps[i].mStride); + ALOGE_IF(err, "alloc(%u, %u, %d, %08x, ...) failed %d (%s)", + mEncoderParams->resolution.width, + mEncoderParams->resolution.height, + OMX_INTEL_COLOR_FormatYUV420PackedSemiPlanar, + GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE, err, strerror(-err)); + mBufferHandleMaps[i].mHeader = NULL; + ALOGI("width %d, height %d, iWidth %d, iHeight %d, iFormat %x", + mEncoderParams->resolution.width, + mEncoderParams->resolution.height, + mBufferHandleMaps[i].mHandle->iWidth, + mBufferHandleMaps[i].mHandle->iHeight, mBufferHandleMaps[i].mHandle->iFormat); + } + } else { + ALOGE("FATAL: can't find the %s module", GRALLOC_HARDWARE_MODULE_ID); + return OMX_ErrorUndefined; + } + } + if (mVideoEncoder->start() != ENCODE_SUCCESS) { LOGE("Start failed, ret = 0x%08x\n", ret); return OMX_ErrorUndefined; @@ -363,6 +396,16 @@ OMX_ERRORTYPE OMXVideoEncoderBase::ProcessorDeinit(void) { if(mVideoEncoder) { mVideoEncoder->stop(); } + + if(bAndroidOpaqueFormat) { + for (int i = 0; i < INPORT_ACTUAL_BUFFER_COUNT; i++) { + status_t err = mAllocDev->free(mAllocDev, + (buffer_handle_t)mBufferHandleMaps[i].mHandle); + ALOGW_IF(err, "free(...) failed %d (%s)", err, strerror(-err)); + mBufferHandleMaps[i].mHandle = NULL; + } + gralloc_close(mAllocDev); + } return OMX_ErrorNone; } @@ -408,15 +451,22 @@ OMX_ERRORTYPE OMXVideoEncoderBase::BuildHandlerList(void) { OMX_ERRORTYPE OMXVideoEncoderBase::GetParamVideoPortFormat(OMX_PTR pStructure) { OMX_ERRORTYPE ret; + OMX_U32 index; OMX_VIDEO_PARAM_PORTFORMATTYPE *p = (OMX_VIDEO_PARAM_PORTFORMATTYPE *)pStructure; CHECK_TYPE_HEADER(p); CHECK_PORT_INDEX_RANGE(p); - CHECK_ENUMERATION_RANGE(p->nIndex, 1); + CHECK_ENUMERATION_RANGE(p->nIndex, 2); PortVideo *port = NULL; port = static_cast<PortVideo *>(this->ports[p->nPortIndex]); + index = p->nIndex; memcpy(p, port->GetPortVideoParam(), sizeof(*p)); + // FIXME: port only supports OMX_COLOR_FormatYUV420SemiPlanar + if (index == 1) { + p->nIndex = 1; + p->eColorFormat = (OMX_COLOR_FORMATTYPE)OMX_COLOR_FormatAndroidOpaque; + } return OMX_ErrorNone; } @@ -431,6 +481,12 @@ OMX_ERRORTYPE OMXVideoEncoderBase::SetParamVideoPortFormat(OMX_PTR pStructure) { // TODO: do we need to check if port is enabled? PortVideo *port = NULL; port = static_cast<PortVideo *>(this->ports[p->nPortIndex]); + // FIXME: port only supports OMX_COLOR_FormatYUV420SemiPlanar + if (p->eColorFormat == OMX_COLOR_FormatAndroidOpaque) { + p->nIndex = 0; + p->eColorFormat = OMX_COLOR_FormatYUV420SemiPlanar; + bAndroidOpaqueFormat = OMX_TRUE; + } port->SetPortVideoParam(p, false); return OMX_ErrorNone; } @@ -760,7 +816,6 @@ OMX_ERRORTYPE OMXVideoEncoderBase::GetStoreMetaDataInBuffers(OMX_PTR pStructure) return OMX_ErrorNone; }; - OMX_ERRORTYPE OMXVideoEncoderBase::SetStoreMetaDataInBuffers(OMX_PTR pStructure) { OMX_ERRORTYPE ret; StoreMetaDataInBuffersParams *p = (StoreMetaDataInBuffersParams *)pStructure; @@ -800,3 +855,46 @@ OMX_ERRORTYPE OMXVideoEncoderBase::SetStoreMetaDataInBuffers(OMX_PTR pStructure) return OMX_ErrorNone; }; +// Utility function that blits the original source buffer in RGBA format to a temporary +// buffer in NV12 format, and use the temporary buffer as the source buffer +int32_t OMXVideoEncoderBase::rgba2nv12conversion(OMX_BUFFERHEADERTYPE *pBuffer) +{ + int i, err; + + // Every input buffer keeps its own state + for (i = 0; i < sizeof(mBufferHandleMaps) / sizeof(mBufferHandleMaps[0]); i++) { + if (mBufferHandleMaps[i].mHeader == pBuffer) + break; + } + if (i == sizeof(mBufferHandleMaps) / sizeof(mBufferHandleMaps[0])) { + for (i = 0; i < sizeof(mBufferHandleMaps) / sizeof(mBufferHandleMaps[0]); i++) { + if (mBufferHandleMaps[i].mHeader == NULL) { + mBufferHandleMaps[i].mHeader = pBuffer; + break; + } + } + } + + // Backup input buffer content + memcpy(mBufferHandleMaps[i].backBuffer, pBuffer->pBuffer, + pBuffer->nFilledLen); + + // Get source buffer handle + memcpy(&mBufferHandleMaps[i].srcBuffer, pBuffer->pBuffer + 4, 4); + + // Color space conversion + err = mGrallocMod->Blit2(mGrallocMod, (native_handle_t*)mBufferHandleMaps[i].srcBuffer, + (native_handle_t*)mBufferHandleMaps[i].mHandle, + mEncoderParams->resolution.width, mEncoderParams->resolution.height, 0, 0); + ALOGE_IF(err, "Blit2(mBufferHandleMaps[%d].srcBuffer)", i); + + // Wrap destination buffer handle to encoder's input format + IntelMetadataBuffer *imb = new IntelMetadataBuffer(MetadataBufferTypeGrallocSource, + (int32_t)mBufferHandleMaps[i].mHandle); + imb->Serialize((uint8_t*&)pBuffer->pBuffer, + (uint32_t&)pBuffer->nFilledLen); + + return i; + +} + diff --git a/videocodec/OMXVideoEncoderBase.h b/videocodec/OMXVideoEncoderBase.h index 3a7f5bf..95c8216 100644 --- a/videocodec/OMXVideoEncoderBase.h +++ b/videocodec/OMXVideoEncoderBase.h @@ -24,10 +24,12 @@ #include <va/va_tpi.h> #include <va/va_android.h> #include<VideoEncoderHost.h> +#include "hal_public.h" using android::sp; #define SHARED_BUFFER_CNT 7 +#define OMX_COLOR_FormatAndroidOpaque 0x7F000789 class OMXVideoEncoderBase : public OMXComponentCodecBase { public: @@ -91,7 +93,8 @@ private: enum { // OMX_PARAM_PORTDEFINITIONTYPE INPORT_MIN_BUFFER_COUNT = 1, - INPORT_ACTUAL_BUFFER_COUNT = 2, + // FIXME: increate input buffer count to 5 + INPORT_ACTUAL_BUFFER_COUNT = 5, INPORT_BUFFER_SIZE = 1382400, // OMX_PARAM_PORTDEFINITIONTYPE @@ -101,6 +104,22 @@ private: }; OMX_U32 mPFrames; + +public: + struct { + OMX_BUFFERHEADERTYPE* mHeader; + buffer_handle_t srcBuffer; + uint8_t backBuffer[4 + sizeof(buffer_handle_t)]; + IMG_native_handle_t* mHandle; + int32_t mStride; + void *vaddr[3]; + } mBufferHandleMaps[INPORT_ACTUAL_BUFFER_COUNT]; + OMX_BOOL bAndroidOpaqueFormat; + alloc_device_t *mAllocDev; + IMG_gralloc_module_public_t const *mGrallocMod; + int32_t mCurHandle; + int32_t rgba2nv12conversion(OMX_BUFFERHEADERTYPE*); + }; #endif /* OMX_VIDEO_ENCODER_BASE_H_ */ diff --git a/videocodec/OMXVideoEncoderH263.cpp b/videocodec/OMXVideoEncoderH263.cpp index dbc5861..15572a4 100644 --- a/videocodec/OMXVideoEncoderH263.cpp +++ b/videocodec/OMXVideoEncoderH263.cpp @@ -108,6 +108,10 @@ OMX_ERRORTYPE OMXVideoEncoderH263::ProcessorProcess( goto out; } + if (bAndroidOpaqueFormat) { + mCurHandle = rgba2nv12conversion(buffers[INPORT_INDEX]); + } + inBuf.data = buffers[INPORT_INDEX]->pBuffer + buffers[INPORT_INDEX]->nOffset; inBuf.size = buffers[INPORT_INDEX]->nFilledLen; @@ -215,6 +219,14 @@ out: if (retains[OUTPORT_INDEX] == BUFFER_RETAIN_NOT_RETAIN) mFrameOutputCount ++; + if (bAndroidOpaqueFormat && buffers[INPORT_INDEX]->nFilledLen != 0) { + // Restore input buffer's content + buffers[INPORT_INDEX]->nFilledLen = 4 + sizeof(buffer_handle_t); + memcpy(buffers[INPORT_INDEX]->pBuffer, mBufferHandleMaps[mCurHandle].backBuffer, + buffers[INPORT_INDEX]->nFilledLen); + + } + LOGV_IF(oret == OMX_ErrorNone, "%s(),%d: exit, encode is done\n", __func__, __LINE__); return oret; diff --git a/videocodec/OMXVideoEncoderMPEG4.cpp b/videocodec/OMXVideoEncoderMPEG4.cpp index ac0225d..d9273f4 100644 --- a/videocodec/OMXVideoEncoderMPEG4.cpp +++ b/videocodec/OMXVideoEncoderMPEG4.cpp @@ -105,6 +105,9 @@ OMX_ERRORTYPE OMXVideoEncoderMPEG4::ProcessorProcess( inBuf.data = buffers[INPORT_INDEX]->pBuffer + buffers[INPORT_INDEX]->nOffset; inBuf.size = buffers[INPORT_INDEX]->nFilledLen; + if (bAndroidOpaqueFormat) { + mCurHandle = rgba2nv12conversion(buffers[INPORT_INDEX]); + } LOGV("inBuf.data=%x, size=%d", (unsigned)inBuf.data, inBuf.size); @@ -222,6 +225,14 @@ out: if (retains[OUTPORT_INDEX] == BUFFER_RETAIN_NOT_RETAIN) mFrameOutputCount ++; + if (bAndroidOpaqueFormat && buffers[INPORT_INDEX]->nFilledLen != 0) { + // Restore input buffer's content + buffers[INPORT_INDEX]->nFilledLen = 4 + sizeof(buffer_handle_t); + memcpy(buffers[INPORT_INDEX]->pBuffer, mBufferHandleMaps[mCurHandle].backBuffer, + buffers[INPORT_INDEX]->nFilledLen); + + } + return oret; } diff --git a/videocodec/hal_public.h b/videocodec/hal_public.h new file mode 100644 index 0000000..292d40f --- /dev/null +++ b/videocodec/hal_public.h @@ -0,0 +1,182 @@ +/* Copyright (c) Imagination Technologies Ltd. + * + * The contents of this file are subject to the MIT license as set out below. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef HAL_PUBLIC_H +#define HAL_PUBLIC_H + +/* Authors of third party hardware composer (HWC) modules will need to include + * this header to access functionality in the gralloc and framebuffer HALs. + */ + +#include <hardware/gralloc.h> + +#define ALIGN(x,a) (((x) + (a) - 1L) & ~((a) - 1L)) +#define HW_ALIGN 32 + +/* This can be tuned down as appropriate for the SOC. + * + * IMG formats are usually a single sub-alloc. + * Some OEM video formats are two sub-allocs (Y, UV planes). + * Future OEM video formats might be three sub-allocs (Y, U, V planes). + */ +#define MAX_SUB_ALLOCS 3 + +enum { + HAL_PIXEL_FORMAT_BGRX_8888 = 0x1FF, + + HAL_PIXEL_FORMAT_NV12 = 0x3231564E, // YCrCb 4:2:0 Planar + HAL_PIXEL_FORMAT_I420 = 0x30323449, + HAL_PIXEL_FORMAT_YUY2 = 0x32595559, + HAL_PIXEL_FORMAT_UYVY = 0x59565955, + + HAL_PIXEL_FORMAT_YCbCr_422_P = 0x12, // IYUV + HAL_PIXEL_FORMAT_YCbCr_420_P = 0x13, // YUV9 + HAL_PIXEL_FORMAT_YCbCr_420_I = 0x15, + HAL_PIXEL_FORMAT_YCbCr_420_SP = 0x21, +}; + +typedef struct +{ + native_handle_t base; + + /* These fields can be sent cross process. They are also valid + * to duplicate within the same process. + * + * A table is stored within psPrivateData on gralloc_module_t (this + * is obviously per-process) which maps stamps to a mapped + * PVRSRV_CLIENT_MEM_INFO in that process. Each map entry has a lock + * count associated with it, satisfying the requirements of the + * Android API. This also prevents us from leaking maps/allocations. + * + * This table has entries inserted either by alloc() + * (alloc_device_t) or map() (gralloc_module_t). Entries are removed + * by free() (alloc_device_t) and unmap() (gralloc_module_t). + * + * As a special case for framebuffer_device_t, framebuffer_open() + * will add and framebuffer_close() will remove from this table. + */ + +#define IMG_NATIVE_HANDLE_NUMFDS MAX_SUB_ALLOCS + /* The `fd' field is used to "export" a meminfo to another process. + * Therefore, it is allocated by alloc_device_t, and consumed by + * gralloc_module_t. The framebuffer_device_t does not need a handle, + * and the special value IMG_FRAMEBUFFER_FD is used instead. + */ + int fd[MAX_SUB_ALLOCS]; + +#define IMG_NATIVE_HANDLE_NUMINTS ((sizeof(unsigned long long) / sizeof(int)) + 6) + /* A KERNEL unique identifier for any exported kernel meminfo. Each + * exported kernel meminfo will have a unique stamp, but note that in + * userspace, several meminfos across multiple processes could have + * the same stamp. As the native_handle can be dup(2)'d, there could be + * multiple handles with the same stamp but different file descriptors. + */ + unsigned long long ui64Stamp; + + /* This is used for buffer usage validation when locking a buffer, + * and also in WSEGL (for the composition bypass feature). + */ + int usage; + + /* In order to do efficient cache flushes we need the buffer dimensions + * and format. These are available on the ANativeWindowBuffer, + * but the platform doesn't pass them down to the graphics HAL. + * + * These fields are also used in the composition bypass. In this + * capacity, these are the "real" values for the backing allocation. + */ + int iWidth; + int iHeight; + int iFormat; + unsigned int uiBpp; + unsigned int ui32FrameIndex; +} +__attribute__((aligned(sizeof(int)),packed)) IMG_native_handle_t; + +typedef struct +{ + framebuffer_device_t base; + + /* The HWC was loaded. post() is no longer responsible for presents */ + int bBypassPost; + + /* HWC path for present posts */ + int (*Post2)(framebuffer_device_t *fb, buffer_handle_t *buffers, + int num_buffers, void *data, int data_length); +} +IMG_framebuffer_device_public_t; + +typedef struct IMG_gralloc_module_public_t +{ + gralloc_module_t base; + + /* If the framebuffer has been opened, this will point to the + * framebuffer device data required by the allocator, WSEGL + * modules and composerhal. + */ + IMG_framebuffer_device_public_t *psFrameBufferDevice; + + int (*GetPhyAddrs)(struct IMG_gralloc_module_public_t const* module, + buffer_handle_t handle, + unsigned int auiPhyAddr[MAX_SUB_ALLOCS]); + + /* Custom-blit components in lieu of overlay hardware */ + int (*Blit)(struct IMG_gralloc_module_public_t const *module, + buffer_handle_t src, + void *dest[MAX_SUB_ALLOCS], int format); + + int (*Blit2)(struct IMG_gralloc_module_public_t const *module, + buffer_handle_t src, buffer_handle_t dest, + int w, int h, int x, int y); +} +IMG_gralloc_module_public_t; + +typedef struct +{ + int l, t, w, h; +} +IMG_write_lock_rect_t; + +typedef struct IMG_buffer_format_public_t +{ + /* Buffer formats are returned as a linked list */ + struct IMG_buffer_format_public_t *psNext; + + /* HAL_PIXEL_FORMAT_... enumerant */ + int iHalPixelFormat; + + /* WSEGL_PIXELFORMAT_... enumerant */ + int iWSEGLPixelFormat; + + /* Friendly name for format */ + const char *const szName; + + /* Bits (not bytes) per pixel */ + unsigned int uiBpp; + + /* GPU output format (creates EGLConfig for format) */ + int bGPURenderable; +} +IMG_buffer_format_public_t; + +#endif /* HAL_PUBLIC_H */ |
