diff options
-rw-r--r-- | camera/Android.mk | 2 | ||||
-rwxr-xr-x | camera/EmulatedCamera.cpp | 2 | ||||
-rwxr-xr-x | camera/EmulatedCameraDevice.cpp | 4 | ||||
-rw-r--r-- | camera/EmulatedFakeCamera2.cpp | 30 | ||||
-rw-r--r-- | camera/EmulatedFakeCamera3.cpp | 26 | ||||
-rw-r--r-- | camera/JpegStub.cpp | 2 | ||||
-rwxr-xr-x | camera/QemuClient.cpp | 14 | ||||
-rw-r--r-- | camera/fake-pipeline2/JpegCompressor.cpp | 4 |
8 files changed, 45 insertions, 39 deletions
diff --git a/camera/Android.mk b/camera/Android.mk index d852e7f..e297778 100644 --- a/camera/Android.mk +++ b/camera/Android.mk @@ -19,6 +19,7 @@ include $(CLEAR_VARS) LOCAL_MODULE_RELATIVE_PATH := hw LOCAL_CFLAGS += -fno-short-enums -DQEMU_HARDWARE +LOCAL_CFLAGS += -Wno-unused-parameter -Wno-missing-field-initializers LOCAL_SHARED_LIBRARIES:= \ libbinder \ liblog \ @@ -78,6 +79,7 @@ include $(CLEAR_VARS) LOCAL_MODULE_RELATIVE_PATH := hw LOCAL_CFLAGS += -fno-short-enums -DQEMU_HARDWARE +LOCAL_CFLAGS += -Wno-unused-parameter LOCAL_SHARED_LIBRARIES:= \ libcutils \ liblog \ diff --git a/camera/EmulatedCamera.cpp b/camera/EmulatedCamera.cpp index 28aede1..096c5b2 100755 --- a/camera/EmulatedCamera.cpp +++ b/camera/EmulatedCamera.cpp @@ -1016,7 +1016,7 @@ static void PrintParamDiff(const CameraParameters& current, /* Divided with ';' */ const char* next = strchr(wrk, ';'); while (next != NULL) { - snprintf(tmp, sizeof(tmp), "%.*s", next-wrk, wrk); + snprintf(tmp, sizeof(tmp), "%.*s", (int)(intptr_t)(next-wrk), wrk); /* in the form key=value */ char* val = strchr(tmp, '='); if (val != NULL) { diff --git a/camera/EmulatedCameraDevice.cpp b/camera/EmulatedCameraDevice.cpp index 5c52808..b76353d 100755 --- a/camera/EmulatedCameraDevice.cpp +++ b/camera/EmulatedCameraDevice.cpp @@ -51,7 +51,7 @@ EmulatedCameraDevice::~EmulatedCameraDevice() if (mCurrentFrame != NULL) { delete[] mCurrentFrame; } - for (int i = 0; i < mSupportedWhiteBalanceScale.size(); ++i) { + for (size_t i = 0; i < mSupportedWhiteBalanceScale.size(); ++i) { if (mSupportedWhiteBalanceScale.valueAt(i) != NULL) { delete[] mSupportedWhiteBalanceScale.valueAt(i); } @@ -223,7 +223,7 @@ status_t EmulatedCameraDevice::commonStartDevice(int width, ALOGE("%s: Unable to allocate framebuffer", __FUNCTION__); return ENOMEM; } - ALOGV("%s: Allocated %p %d bytes for %d pixels in %.4s[%dx%d] frame", + ALOGV("%s: Allocated %p %zu bytes for %d pixels in %.4s[%dx%d] frame", __FUNCTION__, mCurrentFrame, mFrameBufferSize, mTotalPixels, reinterpret_cast<const char*>(&mPixelFormat), mFrameWidth, mFrameHeight); return NO_ERROR; diff --git a/camera/EmulatedFakeCamera2.cpp b/camera/EmulatedFakeCamera2.cpp index f007a86..ca6f5ed 100644 --- a/camera/EmulatedFakeCamera2.cpp +++ b/camera/EmulatedFakeCamera2.cpp @@ -19,6 +19,8 @@ * functionality of an advanced fake camera. */ +#include <inttypes.h> + //#define LOG_NDEBUG 0 #define LOG_TAG "EmulatedCamera_FakeCamera2" #include <utils/Log.h> @@ -926,7 +928,7 @@ bool EmulatedFakeCamera2::ConfigureThread::setupCapture() { b.format = s.format; b.stride = s.stride; mNextBuffers->push_back(b); - ALOGV("Configure: Buffer %d: Stream %d, %d x %d, format 0x%x, " + ALOGV("Configure: Buffer %zu: Stream %d, %d x %d, format 0x%x, " "stride %d", i, b.streamId, b.width, b.height, b.format, b.stride); if (b.format == HAL_PIXEL_FORMAT_BLOB) { @@ -1086,7 +1088,7 @@ bool EmulatedFakeCamera2::ConfigureThread::setupReprocess() { b.format = s.format; b.stride = s.stride; mNextBuffers->push_back(b); - ALOGV("Configure: Buffer %d: Stream %d, %d x %d, format 0x%x, " + ALOGV("Configure: Buffer %zu: Stream %d, %d x %d, format 0x%x, " "stride %d", i, b.streamId, b.width, b.height, b.format, b.stride); } @@ -1327,7 +1329,7 @@ bool EmulatedFakeCamera2::ReadoutThread::threadLoop() { mInFlightQueue[mInFlightHead].request = NULL; mInFlightQueue[mInFlightHead].buffers = NULL; mInFlightHead = (mInFlightHead + 1) % kInFlightQueueSize; - ALOGV("Ready to read out request %p, %d buffers", + ALOGV("Ready to read out request %p, %zu buffers", mRequest, mBuffers->size()); } } @@ -1455,17 +1457,17 @@ bool EmulatedFakeCamera2::ReadoutThread::threadLoop() { mRequest = NULL; int compressedBufferIndex = -1; - ALOGV("Readout: Processing %d buffers", mBuffers->size()); + ALOGV("Readout: Processing %zu buffers", mBuffers->size()); for (size_t i = 0; i < mBuffers->size(); i++) { const StreamBuffer &b = (*mBuffers)[i]; - ALOGV("Readout: Buffer %d: Stream %d, %d x %d, format 0x%x, stride %d", + ALOGV("Readout: Buffer %zu: Stream %d, %d x %d, format 0x%x, stride %d", i, b.streamId, b.width, b.height, b.format, b.stride); if (b.streamId > 0) { if (b.format == HAL_PIXEL_FORMAT_BLOB) { // Assumes only one BLOB buffer type per capture compressedBufferIndex = i; } else { - ALOGV("Readout: Sending image buffer %d (%p) to output stream %d", + ALOGV("Readout: Sending image buffer %zu (%p) to output stream %d", i, (void*)*(b.buffer), b.streamId); GraphicBufferMapper::get().unlock(*(b.buffer)); const Stream &s = mParent->getStreamInfo(b.streamId); @@ -1933,7 +1935,7 @@ int EmulatedFakeCamera2::ControlThread::processAfTrigger(uint8_t afMode, mAfScanDuration = ((double)rand() / RAND_MAX) * (kMaxAfDuration - kMinAfDuration) + kMinAfDuration; afState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN; - ALOGV("%s: AF scan start, duration %lld ms", + ALOGV("%s: AF scan start, duration %" PRId64 " ms", __FUNCTION__, mAfScanDuration / 1000000); break; case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN: @@ -2003,7 +2005,7 @@ int EmulatedFakeCamera2::ControlThread::maybeStartAfScan(uint8_t afMode, mAfScanDuration = ((double)rand() / RAND_MAX) * (kMaxAfDuration - kMinAfDuration) + kMinAfDuration; afState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN; - ALOGV("%s: AF passive scan start, duration %lld ms", + ALOGV("%s: AF passive scan start, duration %" PRId64 " ms", __FUNCTION__, mAfScanDuration / 1000000); } } @@ -2079,7 +2081,7 @@ int EmulatedFakeCamera2::ControlThread::processPrecaptureTrigger(uint8_t aeMode, mAeScanDuration = ((double)rand() / RAND_MAX) * (kMaxPrecaptureAeDuration - kMinPrecaptureAeDuration) + kMinPrecaptureAeDuration; - ALOGD("%s: AE precapture scan start, duration %lld ms", + ALOGD("%s: AE precapture scan start, duration %" PRId64 " ms", __FUNCTION__, mAeScanDuration / 1000000); } @@ -2105,7 +2107,7 @@ int EmulatedFakeCamera2::ControlThread::maybeStartAeScan(uint8_t aeMode, mAeScanDuration = ((double)rand() / RAND_MAX) * (kMaxAeDuration - kMinAeDuration) + kMinAeDuration; aeState = ANDROID_CONTROL_AE_STATE_SEARCHING; - ALOGV("%s: AE scan start, duration %lld ms", + ALOGV("%s: AE scan start, duration %" PRId64 " ms", __FUNCTION__, mAeScanDuration / 1000000); } } @@ -2466,13 +2468,13 @@ status_t EmulatedFakeCamera2::constructStaticInfo( #undef ADD_OR_SIZE /** Allocate metadata if sizing */ if (sizeRequest) { - ALOGV("Allocating %d entries, %d extra bytes for " + ALOGV("Allocating %zu entries, %zu extra bytes for " "static camera info", entryCount, dataCount); *info = allocate_camera_metadata(entryCount, dataCount); if (*info == NULL) { ALOGE("Unable to allocate camera static info" - "(%d entries, %d bytes extra data)", + "(%zu entries, %zu bytes extra data)", entryCount, dataCount); return NO_MEMORY; } @@ -2776,13 +2778,13 @@ status_t EmulatedFakeCamera2::constructDefaultRequest( /** Allocate metadata if sizing */ if (sizeRequest) { - ALOGV("Allocating %d entries, %d extra bytes for " + ALOGV("Allocating %zu entries, %zu extra bytes for " "request template type %d", entryCount, dataCount, request_template); *request = allocate_camera_metadata(entryCount, dataCount); if (*request == NULL) { ALOGE("Unable to allocate new request template type %d " - "(%d entries, %d bytes extra data)", request_template, + "(%zu entries, %zu bytes extra data)", request_template, entryCount, dataCount); return NO_MEMORY; } diff --git a/camera/EmulatedFakeCamera3.cpp b/camera/EmulatedFakeCamera3.cpp index 0274aad..91337ae 100644 --- a/camera/EmulatedFakeCamera3.cpp +++ b/camera/EmulatedFakeCamera3.cpp @@ -19,6 +19,8 @@ * functionality of an advanced fake camera. */ +#include <inttypes.h> + //#define LOG_NDEBUG 0 //#define LOG_NNDEBUG 0 #define LOG_TAG "EmulatedCamera_FakeCamera3" @@ -284,12 +286,12 @@ status_t EmulatedFakeCamera3::configureStreams( camera3_stream_t *newStream = streamList->streams[i]; if (newStream == NULL) { - ALOGE("%s: Stream index %d was NULL", + ALOGE("%s: Stream index %zu was NULL", __FUNCTION__, i); return BAD_VALUE; } - ALOGV("%s: Stream %p (id %d), type %d, usage 0x%x, format 0x%x", + ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x", __FUNCTION__, newStream, i, newStream->stream_type, newStream->usage, newStream->format); @@ -803,27 +805,27 @@ status_t EmulatedFakeCamera3::processCaptureRequest( PrivateStreamInfo *priv = static_cast<PrivateStreamInfo*>(b->stream->priv); if (priv == NULL) { - ALOGE("%s: Request %d: Buffer %d: Unconfigured stream!", + ALOGE("%s: Request %d: Buffer %zu: Unconfigured stream!", __FUNCTION__, frameNumber, idx); return BAD_VALUE; } if (!priv->alive || !priv->registered) { - ALOGE("%s: Request %d: Buffer %d: Unregistered or dead stream!", + ALOGE("%s: Request %d: Buffer %zu: Unregistered or dead stream!", __FUNCTION__, frameNumber, idx); return BAD_VALUE; } if (b->status != CAMERA3_BUFFER_STATUS_OK) { - ALOGE("%s: Request %d: Buffer %d: Status not OK!", + ALOGE("%s: Request %d: Buffer %zu: Status not OK!", __FUNCTION__, frameNumber, idx); return BAD_VALUE; } if (b->release_fence != -1) { - ALOGE("%s: Request %d: Buffer %d: Has a release fence!", + ALOGE("%s: Request %d: Buffer %zu: Has a release fence!", __FUNCTION__, frameNumber, idx); return BAD_VALUE; } if (b->buffer == NULL) { - ALOGE("%s: Request %d: Buffer %d: NULL buffer handle!", + ALOGE("%s: Request %d: Buffer %zu: NULL buffer handle!", __FUNCTION__, frameNumber, idx); return BAD_VALUE; } @@ -895,7 +897,7 @@ status_t EmulatedFakeCamera3::processCaptureRequest( sp<Fence> bufferAcquireFence = new Fence(srcBuf.acquire_fence); res = bufferAcquireFence->wait(kFenceTimeoutMs); if (res == TIMED_OUT) { - ALOGE("%s: Request %d: Buffer %d: Fence timed out after %d ms", + ALOGE("%s: Request %d: Buffer %zu: Fence timed out after %d ms", __FUNCTION__, frameNumber, i, kFenceTimeoutMs); } if (res == OK) { @@ -922,7 +924,7 @@ status_t EmulatedFakeCamera3::processCaptureRequest( (void**)&(destBuf.img)); } if (res != OK) { - ALOGE("%s: Request %d: Buffer %d: Unable to lock buffer", + ALOGE("%s: Request %d: Buffer %zu: Unable to lock buffer", __FUNCTION__, frameNumber, i); } } @@ -974,7 +976,7 @@ status_t EmulatedFakeCamera3::processCaptureRequest( return NO_INIT; } if (syncTimeoutCount == kMaxSyncTimeoutCount) { - ALOGE("%s: Request %d: Sensor sync timed out after %lld ms", + ALOGE("%s: Request %d: Sensor sync timed out after %" PRId64 " ms", __FUNCTION__, frameNumber, kSyncWaitTimeout * kMaxSyncTimeoutCount / 1000000); return NO_INIT; @@ -1438,7 +1440,7 @@ status_t EmulatedFakeCamera3::doFakeAE(CameraMetadata &settings) { if (precaptureTrigger) { ALOGV("%s: Pre capture trigger = %d", __FUNCTION__, precaptureTrigger); } else if (e.count > 0) { - ALOGV("%s: Pre capture trigger was present? %d", + ALOGV("%s: Pre capture trigger was present? %zu", __FUNCTION__, e.count); } @@ -1833,7 +1835,7 @@ void EmulatedFakeCamera3::onSensorEvent(uint32_t frameNumber, Event e, break; } default: - ALOGW("%s: Unexpected sensor event %d at %lld", __FUNCTION__, + ALOGW("%s: Unexpected sensor event %d at %" PRId64, __FUNCTION__, e, timestamp); break; } diff --git a/camera/JpegStub.cpp b/camera/JpegStub.cpp index ce7ca89..084f5fc 100644 --- a/camera/JpegStub.cpp +++ b/camera/JpegStub.cpp @@ -46,7 +46,7 @@ extern "C" int JpegStub_compress(JpegStub* stub, const void* image, SkDynamicMemoryWStream* stream = (SkDynamicMemoryWStream*)stub->mInternalStream; if (encoder->encode(stream, pY, width, height, offsets, quality)) { - ALOGV("%s: Compressed JPEG: %d[%dx%d] -> %d bytes", + ALOGV("%s: Compressed JPEG: %d[%dx%d] -> %zu bytes", __FUNCTION__, (width * height * 12) / 8, width, height, stream->getOffset()); return 0; diff --git a/camera/QemuClient.cpp b/camera/QemuClient.cpp index 17e6f98..111cbb8 100755 --- a/camera/QemuClient.cpp +++ b/camera/QemuClient.cpp @@ -100,7 +100,7 @@ status_t QemuQuery::createQuery(const char* name, const char* param) /* Preallocated buffer was too small. Allocate a bigger query buffer. */ mQuery = new char[required]; if (mQuery == NULL) { - ALOGE("%s: Unable to allocate %d bytes for query buffer", + ALOGE("%s: Unable to allocate %zu bytes for query buffer", __FUNCTION__, required); mQueryDeliveryStatus = ENOMEM; return ENOMEM; @@ -311,7 +311,7 @@ status_t QemuClient::receiveMessage(void** data, size_t* data_size) /* Allocate payload data buffer, and read the payload there. */ *data = malloc(payload_size); if (*data == NULL) { - ALOGE("%s: Unable to allocate %d bytes payload buffer", + ALOGE("%s: Unable to allocate %zu bytes payload buffer", __FUNCTION__, payload_size); return ENOMEM; } @@ -320,7 +320,7 @@ status_t QemuClient::receiveMessage(void** data, size_t* data_size) *data_size = payload_size; return NO_ERROR; } else { - ALOGE("%s: Read size %d doesnt match expected payload size %d: %s", + ALOGE("%s: Read size %d doesnt match expected payload size %zu: %s", __FUNCTION__, rd_res, payload_size, strerror(errno)); free(*data); *data = NULL; @@ -408,7 +408,7 @@ status_t FactoryQemuClient::listCameras(char** list) ALOGD("Emulated camera list: %s", *list); return NO_ERROR; } else { - ALOGE("%s: Unable to allocate %d bytes", + ALOGE("%s: Unable to allocate %zu bytes", __FUNCTION__, query.mReplyDataSize); return ENOMEM; } @@ -512,7 +512,7 @@ status_t CameraQemuClient::queryFrame(void* vframe, ALOGV("%s", __FUNCTION__); char query_str[256]; - snprintf(query_str, sizeof(query_str), "%s video=%d preview=%d whiteb=%g,%g,%g expcomp=%g", + snprintf(query_str, sizeof(query_str), "%s video=%zu preview=%zu whiteb=%g,%g,%g expcomp=%g", mQueryFrame, (vframe && vframe_size) ? vframe_size : 0, (pframe && pframe_size) ? pframe_size : 0, r_scale, g_scale, b_scale, exposure_comp); @@ -536,7 +536,7 @@ status_t CameraQemuClient::queryFrame(void* vframe, memcpy(vframe, frame, vframe_size); cur_offset += vframe_size; } else { - ALOGE("%s: Reply %d bytes is to small to contain %d bytes video frame", + ALOGE("%s: Reply %zu bytes is to small to contain %zu bytes video frame", __FUNCTION__, query.mReplyDataSize - cur_offset, vframe_size); return EINVAL; } @@ -547,7 +547,7 @@ status_t CameraQemuClient::queryFrame(void* vframe, memcpy(pframe, frame + cur_offset, pframe_size); cur_offset += pframe_size; } else { - ALOGE("%s: Reply %d bytes is to small to contain %d bytes preview frame", + ALOGE("%s: Reply %zu bytes is to small to contain %zu bytes preview frame", __FUNCTION__, query.mReplyDataSize - cur_offset, pframe_size); return EINVAL; } diff --git a/camera/fake-pipeline2/JpegCompressor.cpp b/camera/fake-pipeline2/JpegCompressor.cpp index 7202ff3..d0dce26 100644 --- a/camera/fake-pipeline2/JpegCompressor.cpp +++ b/camera/fake-pipeline2/JpegCompressor.cpp @@ -263,7 +263,7 @@ void JpegCompressor::jpegErrorHandler(j_common_ptr cinfo) { void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) { JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest); - ALOGV("%s: Setting destination to %p, size %d", + ALOGV("%s: Setting destination to %p, size %zu", __FUNCTION__, dest->parent->mJpegBuffer.img, kMaxJpegSize); dest->next_output_byte = (JOCTET*)(dest->parent->mJpegBuffer.img); dest->free_in_buffer = kMaxJpegSize; @@ -276,7 +276,7 @@ boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr cinfo) { } void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) { - ALOGV("%s: Done writing JPEG data. %d bytes left in buffer", + ALOGV("%s: Done writing JPEG data. %zu bytes left in buffer", __FUNCTION__, cinfo->dest->free_in_buffer); } |