diff options
author | Eric Luong <x0119002@ti.com> | 2015-02-06 10:54:56 -0800 |
---|---|---|
committer | Hashcode <hashcode0f@gmail.com> | 2015-02-06 10:59:59 -0800 |
commit | 391c312c8964088c512de2cfb1e29c78d245d06b (patch) | |
tree | bdef0b7ecb2ff0da3c9b5922fa358fca1bdbbe73 /pvr-source/services4/srvkm | |
parent | 882c2b4c53e1b2633700906b50c86d4b5f4ce274 (diff) | |
download | hardware_ti_omap4-391c312c8964088c512de2cfb1e29c78d245d06b.tar.gz hardware_ti_omap4-391c312c8964088c512de2cfb1e29c78d245d06b.tar.bz2 hardware_ti_omap4-391c312c8964088c512de2cfb1e29c78d245d06b.zip |
IMG DDK 1.9@2166536 for Android
IMG DDK Release 1.9@2166536 for Android.
Included in this release:
- User space and Kernel module binaries
- Kernel module source code
TI's Patches:
- 084d3db SGX-KM: sgxfreq: Enable on3demand governor as default
- c1e1f93 SGX-KM: on3demand: Added polling based on timeout
- 1baf19c Build: Compile OMAP4 kernel modules with arm-eabi toolchain
- e166956 Build: Add production build option to build script
- 9efd5d3 SGX UM: Properly update vertex shader constants when recompiled
- f6e71f1 Revert "Build: Add optional flag to disable uKernel logging"
- a49e042 SGXKM: Inherit PVRSRV_HAP_GPU_PAGEABLE flag
- f05da87 SGXUM: Creates a new PVRSRV_HAP MAPPING CTRL mask
- 0e6ac23 SGXKM: Creates a new PVRSRV_HAP MAPPING CTRL mask
- 5044cbb SGXKM: Divorce Sparse Mapping from GPU Pageable
- 4abdd37 SGX-KM: sgxfreq: Header for GPL license
- 7a1e61b gpu: thermal: adding cooling device for "case" management
- 1221aba SGX-KM: Add 'userspace' governor to sgxfreq
- 7cc1319 SGX-KM: Add on3demand governor to sgxfreq
- c3283ff SGX-KM: Allow sgxfreq active notification when prev state was active
- 7275e62 SGX-KM: Add idle and active time caluclation to sgxfreq
- e15265c SGX-KM: Add frame_done interface to sgxfreq
- a021f10 SGX-KM: Add activeidle governor to sgxfreq
- bbdceee SGX-KM: Add active/idle notification to sgxfreq
- 4e1e8d9 SGX-UM: Rework SGX idle notification
- fce3459 SGX-KM: Rework SGX idle notification
- 17cdf8c SGX-KM: Add onoff governor to sgxfreq
- 403caa1 SGX-KM: Add cooling device interface to sgxfreq
- 1d785b8 SGX-KM: Add sgxfreq subsystem for DVFS control
- 14de6d8 Build: Add optional flag to disable uKernel logging
- 374bea1 SGX UM: Set ro.product.processor before loading modules
- 91d286d SGX UM: Pvrsrvinit fix typo in remount command
- 3d08869 SGX UM: Remove BRN32044 for omap5
- 086f52b OMAP5: WA: Race condition when SGX is powered down
- 1a904c2 SGX KM: ShrinkPagePool statistics changed to PVR_DBG_MESSAGE
- fbf2890 SGX KM: Fix num_handle calculation for ion handles
- 322af97 BUILD: fix usage and help
- 50440d3 BUILD: Add install option "adb"
- ee66bfb pvr-km: gc: Add page offset for ion allocated buffers
- be4fe11 pvr-km: gc: Improve gc map/unmap logging
- 51da16d gralloc: Map NV12 buffers with the GC MMU
- 210b590 SGX-KM: Enable APM for OMAP5
- 31e2f05 SGX-UM: Enable APM for OMAP5
- a98b81b SGX-UM: Don't load omaplfb module when in-kernel driver is present
- b20f5c6 SGX-KM: Support in-kernel omaplfb
- 0955f19 SGXKM: Multi-plane support for deviceclass i-face
- 11f6682 build: remove omaplfb from install step
- 9ecd6e0 pvr-um: use arm-linux-androideabi- and fix JB debug build
- abef31d PVR-UM: Make pvrsrvinit wrapper compatible with Jellybean
- 5b4e4f0 Revert "SGXUM: Implements Gralloc late CPU mapping"
- 5f25289 SGX-UM: build - Remove target platform based configuration
- 9d5ac31 OMAP5: BUILD: Remove unused variable
- 5365b64 readme: Correct DDK version
- 8095cc6 SGX-UM: Add support for hardware specific powervr.ini files
- 7e13d26 PVR-UM: Add support to DDK for powervr.ini files
- e545f59 SGX-UM: Added 16 bit depth EGL configs
- 27da0ae SGX UM: Srvinit block until services ready
- ba35538 SGX UM: HAL block OpenPVRServices until services initialized
- 43f8c1f SGX UM: Fix calculation of chroma plane in blit_internal
- f6a6944 SGX KM: Dump dsscomp info during HW recovery
- fc6d85b SGXKM: Adds support for 1D buffer allocation
- d8d061b SGXKM: Do not perform explicit invalidate on mmap
- 3ac6e1f SGXUM: Implements Gralloc late CPU mapping
- b621744 SGXUM: Gralloc allow for late or no GPU mapping
- dde30cf SGXUM: Add allocation of images from system heap
- 552c0f5 SGXUM: Adds A8/U8/Y8 color format to WSEGL
- f1c7822 SGXKM: Increase XPROC_WORKAROUND to 500
- 65f61bf SGXKM: Fix cc-check.sh file permissions
- 0dfe392 SGXKM: Make the DMM offset optional
- 946eb30 gralloc: add support for GRALLOC_USAGE_EXCLUSIVE_DISP
- 5cf7248 gralloc: publicly define omap specific usage flags
- afcb9bd SGX-KM: Block DPLL cascading when SGX clock is enabled
- 616ff0b SGX-KM: Hold wake lock during hardware recovery
- 872b4c0 SGXKM: Fix NULL handle warning when blitting GC320
- 39de55c SGXKM: Allow for late or no GPU mapping
- d229a7b SGXKM: Allow for SW access to a tiler buffer
- 7024790 SGXUM: Adds YUV plane offsets for MM
- d202649 SGXKM: SGX Tiler non-page-aligned support
- 2b2ac18 SGXUM: Implements GPU Common Buffer Interface
- 86cd052 SGXUM: Multi-buffer manage bridge
- d272c49 SGXKM: Multi-buffer manage bridge
- 4d8facf SGXKM: Implements Heap Alloc Failure Report
- 6d4253a SGXUM: Add support for GPU unmap/remap
- 64f4805 SGXKM: Add support for GPU unmap/remap
- 5425356 SGX-KM: Use CONFIG_DRM_OMAP_DMM_TILER for kernel 3.4
- 853be19 SGX-KM: Use pud_offset to get pmd_offset
- 5ec5d70 PVR-KM: Prevent compilation of dc_omapfb3_linux
- 1bbe8a2 SGX-KM: Remove hardcoding of values in egl.cfg
- 83b8af6 pvr-km: kfree phys info at unmap instead of map
- f347fb9 pvr-km: add a struct size to the physical descriptor
- 6ccff8f gralloc: Set flag to enable GC MMU mapping in PVR services
- 0cfaa6d PVR-KM: Add function to obtain BV descriptor through 3PDC interface
- c8f4c5f PVR-KM: Map buffers to GC core MMU on allocation time with Bltsville
- 65b2b84 SGXKM: Prevent mapping of export with zero ref
- f4cc0a2 OMAP4-SGX-UM: Allow for tiler buffer SW access
- 5c97ded OMAP4-SGX-UM: Gralloc SW access and caching flags
- bbf5424 OMAP4-SGX-UM: Gralloc HAL_PIXEL_FORMAT_NV12 format
- ec6cc69 SGX-KM: Make PVRSRVExportFDToIONHandles generic and register it with ion
- 8c1255d PVR-KM: OMAP5: Use shared device for Tiler 2D Mappings
- 2391ac8 PVR-KM: OMAP5: Hardcode core version value
- 7d87962 SGX-KM: OMAP5: HACK: Set the size of the SGX registers
- 9f40224 SGX-UM: add detection of OMAP5432 in pvrsrvinit
- f75d48b SGX-UM: build: Add panda5 and omap5sevm to product list
- c23eff9 SGX-KM: BUILD: Add OMAP5 support
- 5cc4ade SGX-UM: BUILD: Consolidate build into a single Makefile
- 2c6a2f6 SGX-KM: (build) Remove Android product and version dependency
- 6f54fe8 Build: Don't install egl.cfg anymore
- a49c59c SGX-KM: egl.cfg sysfs entry
- c759928 SGX-KM: Enable DPF, TRACE and ASSERT
- 1628094 build-km: Enable blaze and blaze_tablet for ICS environment
- 05f00eb build: Enable blaze and blaze_tablet for ICS environment
- 542e279 SGX-KM: Add ability to build multiple sets of GFX kernel modules
- 69d3661 build: Set correct load directory for kernel modules.
- 2dfe14b KM: add support for sgx544sc
- 58f317a SGX-UM: Add ability to build multiple sets of GFX binaries
- 04e5deb SGX-KM: Use platform data for OPP symbols.
- 5eed373 SGX-UM: Enable building binaries for specific SGX
- 0801be2 readme: Add README
- 649d010 build: Add build_DDK.sh
- fe34640 Create gitignore file
- 519ca9a IMG DDK 1.9@2166536 for Android
Change-Id: I4a060344fa134a2484d1b7a69fc87963455e9e34
Signed-off-by: Eric Luong <x0119002@ti.com>
Diffstat (limited to 'pvr-source/services4/srvkm')
110 files changed, 85125 insertions, 0 deletions
diff --git a/pvr-source/services4/srvkm/bridged/bridged_pvr_bridge.c b/pvr-source/services4/srvkm/bridged/bridged_pvr_bridge.c new file mode 100644 index 0000000..d98a71c --- /dev/null +++ b/pvr-source/services4/srvkm/bridged/bridged_pvr_bridge.c @@ -0,0 +1,5512 @@ +/*************************************************************************/ /*! +@Title PVR Common Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + + +#include <stddef.h> + +#include "img_defs.h" +#include "services.h" +#include "pvr_bridge_km.h" +#include "pvr_debug.h" +#include "ra.h" +#include "pvr_bridge.h" +#if defined(SUPPORT_SGX) +#include "sgx_bridge.h" +#endif +#if defined(SUPPORT_VGX) +#include "vgx_bridge.h" +#endif +#if defined(SUPPORT_MSVDX) +#include "msvdx_bridge.h" +#endif +#include "perproc.h" +#include "device.h" +#include "buffer_manager.h" +#include "refcount.h" + +#include "pdump_km.h" +#include "syscommon.h" + +#include "bridged_pvr_bridge.h" +#if defined(SUPPORT_SGX) +#include "bridged_sgx_bridge.h" +#endif +#if defined(SUPPORT_VGX) +#include "bridged_vgx_bridge.h" +#endif +#if defined(SUPPORT_MSVDX) +#include "bridged_msvdx_bridge.h" +#endif + +#include "env_data.h" + +#if defined (__linux__) || defined(__QNXNTO__) +#include "mmap.h" +#endif + + +#include "srvkm.h" + +/* FIXME: we should include an OS specific header here to allow configuration of + * which functions should be excluded (like the shared srvclient bridge code) + * so that ports may choose to override certain things. */ + +/* For the purpose of maintainability, it is intended that this file should not + * contain large amounts of OS specific #ifdefs. Headers are fine, and perhaps + * a few one liners, but for anything more, please find a way to add e.g. + * an osfunc.c abstraction or override the entire function in question within + * env,*,pvr_bridge_k.c + */ + + +PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; + +#if defined(DEBUG_BRIDGE_KM) +PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; +#endif + +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) +static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS]; +static IMG_BOOL *pbSharedDeviceMemHeap = abSharedDeviceMemHeap; +#else +static IMG_BOOL *pbSharedDeviceMemHeap = (IMG_BOOL*)IMG_NULL; +#endif + + +#if defined(DEBUG_BRIDGE_KM) +PVRSRV_ERROR +CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size; + return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size); +} +PVRSRV_ERROR +CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size) +{ + g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size; + g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size; + return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size); +} +#endif + + +static IMG_INT +PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES); + + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + psEnumDeviceOUT->eError = + PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices, + psEnumDeviceOUT->asDeviceIdentifier); + + return 0; +} + +static IMG_INT +PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN, + PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO); + + psAcquireDevInfoOUT->eError = + PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex, + psAcquireDevInfoIN->eDeviceType, + &hDevCookieInt); + if(psAcquireDevInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* + * Handle is not allocated in batch mode, as there is no resource + * allocation to undo if the handle allocation fails. + */ + psAcquireDevInfoOUT->eError = + PVRSRVAllocHandle(psPerProc->psHandleBase, + &psAcquireDevInfoOUT->hDevCookie, + hDevCookieInt, + PVRSRV_HANDLE_TYPE_DEV_NODE, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + return 0; +} + + +static IMG_INT +PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN, + PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + IMG_UINT32 i; + IMG_BOOL bCreated; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT); + + /* + * We potentially need one handle for the device memory context, + * and one handle for each client heap. + */ + NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1) + + psCreateDevMemContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psCreateDevMemContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + psCreateDevMemContextOUT->eError = + PVRSRVCreateDeviceMemContextKM(hDevCookieInt, + psPerProc, + &hDevMemContextInt, + &psCreateDevMemContextOUT->ui32ClientHeapCount, +#if defined (SUPPORT_SID_INTERFACE) + &asHeapInfo[0], +#else + &psCreateDevMemContextOUT->sHeapInfo[0], +#endif + &bCreated, + pbSharedDeviceMemHeap); + + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* + * Only allocate a handle if the device memory context was created. + * If an existing context was returned, lookup the existing + * handle. + */ + if(bCreated) + { + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psCreateDevMemContextOUT->hDevMemContext, + hDevMemContextInt, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + } + else + { + psCreateDevMemContextOUT->eError = + PVRSRVFindHandle(psPerProc->psHandleBase, + &psCreateDevMemContextOUT->hDevMemContext, + hDevMemContextInt, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemHeapExt; +#else + IMG_HANDLE hDevMemHeapExt; +#endif + +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + if(abSharedDeviceMemHeap[i]) +#endif + { + /* + * Heaps shared by everybody. These heaps are not + * created as part of the device memory context + * creation, and exist for the lifetime of the + * driver, hence, we use shared handles for these + * heaps. + */ +#if defined (SUPPORT_SID_INTERFACE) + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &hDevMemHeapExt, + asHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); +#else + PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt, + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); +#endif + } +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + else + { + /* + * Heaps belonging to this context. The handles for + * these are made subhandles of the memory context + * handle, so that they are automatically deallocated + * when the memory context handle is deallocated. + */ + if(bCreated) + { +#if defined (SUPPORT_SID_INTERFACE) + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &hDevMemHeapExt, + asHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psCreateDevMemContextOUT->hDevMemContext); +#else + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt, + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psCreateDevMemContextOUT->hDevMemContext); +#endif + } + else + { + psCreateDevMemContextOUT->eError = + PVRSRVFindHandle(psPerProc->psHandleBase, + &hDevMemHeapExt, +#if defined (SUPPORT_SID_INTERFACE) + asHeapInfo[i].hDevMemHeap, +#else + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap, +#endif + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if(psCreateDevMemContextOUT->eError != PVRSRV_OK) + { + return 0; + } + } + } +#endif + psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt; +#if defined (SUPPORT_SID_INTERFACE) + psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID; + psCreateDevMemContextOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase; + psCreateDevMemContextOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize; + psCreateDevMemContextOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs; + psCreateDevMemContextOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride; +#endif + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + IMG_BOOL bDestroyed; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psDestroyDevMemContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, + psDestroyDevMemContextIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(bDestroyed) + { + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psDestroyDevMemContextIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + } + + return 0; +} + + +static IMG_INT +PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN, + PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + IMG_UINT32 i; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO); + + NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS) + + psGetDevMemHeapInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psGetDevMemHeapInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDevMemHeapInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, + psGetDevMemHeapInfoIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDevMemHeapInfoOUT->eError = + PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt, + hDevMemContextInt, + &psGetDevMemHeapInfoOUT->ui32ClientHeapCount, +#if defined (SUPPORT_SID_INTERFACE) + &asHeapInfo[0], +#else + &psGetDevMemHeapInfoOUT->sHeapInfo[0], +#endif + pbSharedDeviceMemHeap); + + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hDevMemHeapExt; +#else + IMG_HANDLE hDevMemHeapExt; +#endif + +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + if(abSharedDeviceMemHeap[i]) +#endif + { + /* + * Heaps shared by everybody. These heaps are not + * created as part of the device memory context + * creation, and exist for the lifetime of the + * driver, hence, we use shared handles for these + * heaps. + */ +#if defined (SUPPORT_SID_INTERFACE) + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &hDevMemHeapExt, + asHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); +#else + PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt, + psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); +#endif + } +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + else + { + /* + * Heaps belonging to this context. The handles for + * these are made subhandles of the memory context + * handle, so that they are automatically deallocated + * when the memory context handle is deallocated. + */ + psGetDevMemHeapInfoOUT->eError = + PVRSRVFindHandle(psPerProc->psHandleBase, + &hDevMemHeapExt, +#if defined (SUPPORT_SID_INTERFACE) + asHeapInfo[i].hDevMemHeap, +#else + psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap, +#endif + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif + psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt; +#if defined (SUPPORT_SID_INTERFACE) + psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapID = asHeapInfo[i].ui32HeapID; + psGetDevMemHeapInfoOUT->sHeapInfo[i].sDevVAddrBase = asHeapInfo[i].sDevVAddrBase; + psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize; + psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32Attribs = asHeapInfo[i].ui32Attribs; + psGetDevMemHeapInfoOUT->sHeapInfo[i].ui32XTileStride = asHeapInfo[i].ui32XTileStride; +#endif + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc) + + return 0; +} + + +#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW) +/* customised version */ +IMG_INT +PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN, + PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc); +#else +static IMG_INT +PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN, + PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemHeapInt; + IMG_UINT32 ui32ShareIndex; + IMG_BOOL bUseShareMemWorkaround; + IMG_BOOL *pabMapChunk = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM); + + NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2) + + /* Do same sanity checking */ + if (psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_SPARSE) + { + if (psAllocDeviceMemIN->ui32NumPhysChunks > psAllocDeviceMemIN->ui32NumVirtChunks) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: more physical chunks then virtual space")); + psAllocDeviceMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + if (psAllocDeviceMemIN->pabMapChunk == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: Called in sparse mapping mode but without MapChunk array")); + psAllocDeviceMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + } + + psAllocDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psAllocDeviceMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psAllocDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt, + psAllocDeviceMemIN->hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Memory sharing workaround, version 2 */ + + bUseShareMemWorkaround = ((psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_XPROC) != 0) ? IMG_TRUE : IMG_FALSE; + ui32ShareIndex = 7654321; /* stops MSVC compiler warning */ + + if (bUseShareMemWorkaround) + { + /* allocate a shared-surface ID, prior to the call to AllocDeviceMem */ + /* We could plumb in an extra argument, but for now, we'll keep the + shared-surface ID as a piece of global state, and rely upon the + bridge mutex to make it safe for us */ + + psAllocDeviceMemOUT->eError = + BM_XProcWorkaroundFindNewBufferAndSetShareIndex(&ui32ShareIndex); + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + /* Check access to private data, if provided */ + if(psAllocDeviceMemIN->pvPrivData) + { + if(!OSAccessOK(PVR_VERIFY_READ, + psAllocDeviceMemIN->pvPrivData, + psAllocDeviceMemIN->ui32PrivDataLength)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: Access check failed for pvPrivData")); + return -EFAULT; + } + } + + if (psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_SPARSE) + { + /* Check access to the sparse mapping table, if provided */ + if(!OSAccessOK(PVR_VERIFY_READ, + psAllocDeviceMemIN->pabMapChunk, + psAllocDeviceMemIN->ui32NumVirtChunks)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocDeviceMemBW: Access check failed for pabMapChunk")); + return -EFAULT; + } + + psAllocDeviceMemOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_BOOL) * psAllocDeviceMemIN->ui32NumVirtChunks, + (IMG_VOID **) &pabMapChunk, + 0, + "MapChunk kernel copy"); + if (psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psAllocDeviceMemOUT->eError = OSCopyFromUser(psPerProc, + pabMapChunk, + psAllocDeviceMemIN->pabMapChunk, + sizeof(IMG_BOOL) * psAllocDeviceMemIN->ui32NumVirtChunks); + if (psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_BOOL) * psAllocDeviceMemIN->ui32NumVirtChunks, + pabMapChunk, + 0); + return 0; + } + } + + + psAllocDeviceMemOUT->eError = + PVRSRVAllocDeviceMemKM(hDevCookieInt, + psPerProc, + hDevMemHeapInt, + psAllocDeviceMemIN->ui32Attribs, + psAllocDeviceMemIN->ui32Size, + psAllocDeviceMemIN->ui32Alignment, + psAllocDeviceMemIN->pvPrivData, + psAllocDeviceMemIN->ui32PrivDataLength, + psAllocDeviceMemIN->ui32ChunkSize, + psAllocDeviceMemIN->ui32NumVirtChunks, + psAllocDeviceMemIN->ui32NumPhysChunks, + pabMapChunk, + &psMemInfo, + "" /*FIXME: add something meaningful*/); + + /* Allow mapping this buffer to the GC MMU only on allocation time, if + * this buffer is mapped into another process context we don't want the + * GC MMU mapping to happen. + */ + psAllocDeviceMemIN->ui32Attribs &= ~PVRSRV_MAP_GC_MMU; + + if (bUseShareMemWorkaround) + { + PVR_ASSERT(ui32ShareIndex != 7654321); + BM_XProcWorkaroundUnsetShareIndex(ui32ShareIndex); + } + + if(psAllocDeviceMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psMemInfo->sShareMemWorkaround.bInUse = bUseShareMemWorkaround; + if (bUseShareMemWorkaround) + { + PVR_ASSERT(ui32ShareIndex != 7654321); + psMemInfo->sShareMemWorkaround.ui32ShareIndex = ui32ShareIndex; + psMemInfo->sShareMemWorkaround.hDevCookieInt = hDevCookieInt; + psMemInfo->sShareMemWorkaround.ui32OrigReqAttribs = psAllocDeviceMemIN->ui32Attribs; + psMemInfo->sShareMemWorkaround.ui32OrigReqSize = (IMG_UINT32)psAllocDeviceMemIN->ui32Size; + psMemInfo->sShareMemWorkaround.ui32OrigReqAlignment = (IMG_UINT32)psAllocDeviceMemIN->ui32Alignment; + } + + OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo, + 0, + sizeof(psAllocDeviceMemOUT->sClientMemInfo)); + + psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM = + psMemInfo->pvLinAddrKM; + +#if defined (__linux__) + psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0; +#else + psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM; +#endif + psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; + psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; + psAllocDeviceMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize; + OSMemCopy(psAllocDeviceMemOUT->sClientMemInfo.planeOffsets, psMemInfo->planeOffsets, + sizeof(psMemInfo->planeOffsets)); +#if defined (SUPPORT_SID_INTERFACE) + /* see below */ +#else + psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle; +#endif + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + +#if defined (SUPPORT_SID_INTERFACE) + PVR_ASSERT(psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo != 0); + + if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo, + psMemInfo->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo); + } + else + { + psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = 0; + } +#endif + + if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) + { + /* signal no syncinfo */ + OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo, + 0, + sizeof (PVRSRV_CLIENT_SYNC_INFO)); + psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL; + } + else + { + /* and setup the sync info */ + +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psAllocDeviceMemOUT->sClientSyncInfo.psSyncData = + psMemInfo->psKernelSyncInfo->psSyncData; + psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psAllocDeviceMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + +#if defined (SUPPORT_SID_INTERFACE) + if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo, + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo); + } + else + { + psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = 0; + } +#else + psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif +#endif + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo, + psMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo); + + psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = + &psAllocDeviceMemOUT->sClientSyncInfo; + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc) + + return 0; +} + +#endif /* OS_PVRSRV_ALLOC_DEVICE_MEM_BW */ + +static IMG_INT +PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_VOID *pvKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psFreeDeviceMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvKernelMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psFreeDeviceMemIN->hKernelMemInfo, +#else + psFreeDeviceMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + psFreeDeviceMemIN->hKernelMemInfo, +#else + psFreeDeviceMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + + +static IMG_INT +PVRSRVMultiManageDevMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MULTI_MANAGE_DEV_MEM *psMultiMemDevRequestIN, + PVRSRV_BRIDGE_OUT_MULTI_MANAGE_DEV_MEM *psMultiMemDevRequestOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psSharedBuffKernelMemInfo = NULL; + PVRSRV_MANAGE_DEV_MEM_REQUEST* pRequestsArray; + PVRSRV_MANAGE_DEV_MEM_RESPONSE* pResponseArray; + IMG_UINT32 reqNum; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MULTI_MANAGE_DEV_MEM); + + psMultiMemDevRequestOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psMultiMemDevRequestIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psMultiMemDevRequestOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: invalid hDevCookie", __FUNCTION__)); + return 0; + } + + if(psMultiMemDevRequestIN->hKernelMemInfo) + { + PVRSRV_MULTI_MANAGE_DEV_MEM_REQUESTS* psMultiMemDevRequest; + psMultiMemDevRequestOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psSharedBuffKernelMemInfo, + #if defined (SUPPORT_SID_INTERFACE) + psMultiMemDevRequestIN->hKernelMemInfo, + #else + psMultiMemDevRequestIN->hKernelMemInfo, + #endif + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + + if(psMultiMemDevRequestOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: invalid shared memory hKernelMemInfo", __FUNCTION__)); + return 0; + } + + psMultiMemDevRequest = (PVRSRV_MULTI_MANAGE_DEV_MEM_REQUESTS*)psSharedBuffKernelMemInfo->pvLinAddrKM; + if( (psMultiMemDevRequest->psSharedMemClientMemInfo != psMultiMemDevRequestIN->psSharedMemClientMemInfo ) || + (psMultiMemDevRequest->ui32MaxNumberOfRequests != psMultiMemDevRequestIN->ui32MaxNumberOfRequests) || + psMultiMemDevRequest->ui32NumberOfValidRequests != psMultiMemDevRequestIN->ui32NumberOfValidRequests || + psMultiMemDevRequest->ui32CtrlFlags != psMultiMemDevRequestIN->ui32CtrlFlags) + { + psMultiMemDevRequestOUT->eError = PVRSRV_ERROR_BAD_MAPPING; + return 0; + } + pRequestsArray = psMultiMemDevRequest->sMemRequests; + pResponseArray = psMultiMemDevRequest->sMemRequests; + } + else + { + pRequestsArray = psMultiMemDevRequestIN->sMemRequests; + pResponseArray = psMultiMemDevRequestOUT->sMemResponse; + } + + PVR_DPF((PVR_DBG_MESSAGE, "\n%s: %s %d Number of request/s, Control flag = 0x%08x\n", + __FUNCTION__, + (psMultiMemDevRequestIN->hKernelMemInfo ? "Shared" : "Direct"), + psMultiMemDevRequestIN->ui32NumberOfValidRequests, + psMultiMemDevRequestIN->ui32CtrlFlags)); + + for(reqNum = 0; reqNum < psMultiMemDevRequestIN->ui32NumberOfValidRequests; reqNum++) + { + PVRSRV_MANAGE_DEV_MEM_REQUEST *pRequest = &pRequestsArray[reqNum]; + PVRSRV_MANAGE_DEV_MEM_REQUEST *pResponse = &pResponseArray[reqNum]; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = NULL; + + /* At the kernel size, psClientMemInfo only works as a verification token */ + if(psMultiMemDevRequestIN->hKernelMemInfo == NULL) + { + pResponse->psClientMemInfo = pRequest->psClientMemInfo; + pResponse->eReqType = pRequest->eReqType; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Request %d for ClientMemInfo %p\n" + "DevVirtAddr 0x%08x, GpuRefCount %d " + "CpuVirtAddr %p, CpuRefCount %d, Kernel Handle %p, sync %p\n" + "Size %d, Attrib 0x%08x, Align %d, Subsystem 0x%llx, Hints 0x%08x " + "transfer slot %d\n", + __FUNCTION__, pResponse->eReqType, + pRequest->psClientMemInfo, + pRequest->sDevVAddr.uiAddr, + pRequest->ui32GpuMapRefCount, + pRequest->pvLinAddr, + pRequest->ui32CpuMapRefCount, + pRequest->hKernelMemInfo, + pRequest->hKernelSyncInfo, + pRequest->uSize, + pRequest->ui32Attribs, + pRequest->uAlignment, + pRequest->uiSubSystem, + pRequest->ui32Hints, + pRequest->ui32TransferFromToReqSlotIndx)); + + pResponse->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + #if defined (SUPPORT_SID_INTERFACE) + pRequest->hKernelMemInfo, + #else + pRequest->hKernelMemInfo, + #endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(pResponse->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: invalid hKernelMemInfo for slot %d", + __FUNCTION__, reqNum)); + continue; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: KernelMemInfo %p -%s SHARED\n" + "DevVirtAddr 0x%08x, RefCount %d " + "Size %d, Flags 0x%08x, OrigAlign %d, Subsystem 0x%llx, Hints 0x%08x\n", + __FUNCTION__, psKernelMemInfo, + (psKernelMemInfo->sShareMemWorkaround.bInUse ? "" : "NOT"), + psKernelMemInfo->sDevVAddr.uiAddr, + psKernelMemInfo->ui32RefCount, + psKernelMemInfo->uAllocSize, + psKernelMemInfo->ui32Flags, + psKernelMemInfo->sShareMemWorkaround.ui32OrigReqAlignment, + (IMG_UINT64)0, 0)); + + if(psKernelMemInfo->sDevVAddr.uiAddr != pRequest->sDevVAddr.uiAddr) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Kernel and Client MemInfo's " + "virtual addresses are not equal\n" + "Kernel DevVirtAddr 0x%08x != Client DevVirtAddr 0x%08x", + __FUNCTION__, + psKernelMemInfo->sDevVAddr.uiAddr, pRequest->sDevVAddr.uiAddr)); + } + + switch(pResponse->eReqType) + { + case PVRSRV_MULTI_MANAGE_DEV_MEM_RQST_MAP: + case PVRSRV_MULTI_MANAGE_DEV_MEM_RQST_LOCK_MAP: + { + IMG_INT32 result = PVRSRVRemapToDevKM(hDevCookieInt, + psKernelMemInfo, &pResponse->sDevVAddr); + + if(result < 0) + { + pResponse->eError = -result; + PVR_DPF((PVR_DBG_ERROR, "Request for GPU Virtual " + "memory mapping had failed " + "with error %d", + pResponse->eError)); + } + else + { + pResponse->ui32GpuMapRefCount = result; + pResponse->eError = PVRSRV_OK; + } + } + break; + case PVRSRV_MULTI_MANAGE_DEV_MEM_RQST_SWAP_MAP_TO_NEXT: + pResponse->eError = PVRSRV_OK; + pResponse->ui32GpuMapRefCount = 1; + pResponse->sDevVAddr = psKernelMemInfo->sDevVAddr; + break; + case PVRSRV_MULTI_MANAGE_DEV_MEM_RQST_UNMAP: + case PVRSRV_MULTI_MANAGE_DEV_MEM_RQST_UNLOCK_MAP: + { + IMG_INT32 result = PVRSRVUnmapFromDevKM(hDevCookieInt, psKernelMemInfo); + if(result < 0) + { + pResponse->eError = -result; + PVR_DPF((PVR_DBG_ERROR, "Request for GPU Virtual memory " + "un-mapping had failed " + "with error %d", + pResponse->eError)); + } + else + { + pResponse->ui32GpuMapRefCount = result; + pResponse->eError = PVRSRV_OK; + } + pResponse->sDevVAddr = psKernelMemInfo->sDevVAddr; + } + break; + case PVRSRV_MULTI_MANAGE_DEV_MEM_RQST_SWAP_MAP_FROM_PREV: + pResponse->eError = PVRSRV_OK; + pResponse->ui32GpuMapRefCount = 1; + pResponse->sDevVAddr = psKernelMemInfo->sDevVAddr; + break; + default: + pResponse->eError = PVRSRV_ERROR_INVALID_PARAMS; + break; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: RETURN: ClientMemInfo %p " + "DevVirtAddr 0x%08x, GpuMapRefCount %d, err %d\n", + __FUNCTION__, pRequest->psClientMemInfo, + pResponse->sDevVAddr.uiAddr, + pResponse->ui32GpuMapRefCount, + pResponse->eError)); + } + + if(psMultiMemDevRequestIN->hKernelMemInfo == NULL) + psMultiMemDevRequestOUT->ui32CtrlFlags = psMultiMemDevRequestIN->ui32CtrlFlags; + /* No status implemented yet */ + psMultiMemDevRequestOUT->ui32StatusFlags = 0; + + return 0; +} + +static IMG_INT +PVRSRVExportDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN, + PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL; +#else + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; +#endif + + PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM) || + ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2)); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + /* find the device cookie */ + psExportDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psExportDeviceMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psExportDeviceMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie")); + return 0; + } + + /* find the kernel meminfo from the process handle list */ + psExportDeviceMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psExportDeviceMemIN->hKernelMemInfo, +#else + psExportDeviceMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psExportDeviceMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo")); + return 0; + } + + /* see if it's already exported */ + psExportDeviceMemOUT->eError = + PVRSRVFindHandle(KERNEL_HANDLE_BASE, + &psExportDeviceMemOUT->hMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psExportDeviceMemOUT->eError == PVRSRV_OK) + { + /* it's already exported */ + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported")); + return 0; + } + + /* export the allocation */ + psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE, + &psExportDeviceMemOUT->hMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + if (psExportDeviceMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list")); + return 0; + } + + /* mark the meminfo as 'exported' */ + psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED; + + return 0; +} + + +static IMG_INT +PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN, + PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL; + PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL; + IMG_HANDLE hDstDevMemHeap = IMG_NULL; + + PVR_ASSERT(ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY) || + ui32BridgeID == PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2)); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2) + + /* lookup srcmeminfo handle */ + psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_VOID**)&psSrcKernelMemInfo, + psMapDevMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* lookup dev mem heap handle */ + psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDstDevMemHeap, + psMapDevMemIN->hDstDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* check for workaround */ + if (psSrcKernelMemInfo->sShareMemWorkaround.bInUse) + { + PVR_DPF((PVR_DBG_MESSAGE, "using the mem wrap workaround.")); + + /* Check the XPROC mapping count -if it is "0", + * then the object is about to go away - do not allow mapping */ + if(BM_XProcGetShareDataRefCount(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex) < 1) + { + psMapDevMemOUT->eError = PVRSRV_ERROR_MAPPING_NOT_FOUND; + PVR_DPF((PVR_DBG_WARNING, "%s: Can't map buffer with slot %d, size %d " + "and refcount %d\n\t Invalid XPROC refcount of %d", + __FUNCTION__, psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex, + psSrcKernelMemInfo->uAllocSize, psSrcKernelMemInfo->ui32RefCount, + BM_XProcGetShareDataRefCount(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex))); + return 0; + } + + /* Ensure we get the same ID for this allocation, such that it + inherits the same physical block. Rather than add a lot of + plumbing to several APIs, we call into buffer manager directly + to set "global" state. This works only if we make + this allocation while holding the bridge mutex and don't + make any other allocations (because the state persists and + would affect other device memory allocations too). It is + important that we bracket the PVRSRVAllocDeviceMemKM() call + with this Set/Unset pair. */ + psMapDevMemOUT->eError = BM_XProcWorkaroundSetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW(): failed to recycle shared buffer")); + return 0; + } + + psMapDevMemOUT->eError = + PVRSRVAllocDeviceMemKM(psSrcKernelMemInfo->sShareMemWorkaround.hDevCookieInt, + psPerProc, + hDstDevMemHeap, + psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAttribs | PVRSRV_MEM_NO_SYNCOBJ, + psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqSize, + psSrcKernelMemInfo->sShareMemWorkaround.ui32OrigReqAlignment, + IMG_NULL, + 0, + /* FIXME: Do we need to be able to export sparse memory? */ + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psDstKernelMemInfo, + "" /*FIXME: add something meaningful*/); + /* counterpart of the above "SetShareIndex". NB: this must be + done in both the success and failure paths of the + AllocDeviceMemKM() call */ + BM_XProcWorkaroundUnsetShareIndex(psSrcKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryBW: Failed to create allocation for cross-process memory map")); + return 0; + } + + if(psSrcKernelMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoIncRef(psSrcKernelMemInfo->psKernelSyncInfo, psSrcKernelMemInfo); + } + + psDstKernelMemInfo->psKernelSyncInfo = psSrcKernelMemInfo->psKernelSyncInfo; + } + else + { + /* map the meminfo to the target heap and memory context */ + psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc, + psSrcKernelMemInfo, + hDstDevMemHeap, + &psDstKernelMemInfo); + if(psMapDevMemOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + /* copy the workaround info */ + psDstKernelMemInfo->sShareMemWorkaround = psSrcKernelMemInfo->sShareMemWorkaround; + + OSMemSet(&psMapDevMemOUT->sDstClientMemInfo, + 0, + sizeof(psMapDevMemOUT->sDstClientMemInfo)); + OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo, + 0, + sizeof(psMapDevMemOUT->sDstClientSyncInfo)); + + psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM = + psDstKernelMemInfo->pvLinAddrKM; + + psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0; + psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr; + psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags; + psMapDevMemOUT->sDstClientMemInfo.uAllocSize = psDstKernelMemInfo->uAllocSize; + OSMemCopy(psMapDevMemOUT->sDstClientMemInfo.planeOffsets, psDstKernelMemInfo->planeOffsets, + sizeof(psDstKernelMemInfo->planeOffsets)); +#if defined (SUPPORT_SID_INTERFACE) + /* see below */ +#else + psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle; +#endif + + /* allocate handle to the DST kernel meminfo */ + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo, + psDstKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL; + +#if defined (SUPPORT_SID_INTERFACE) + /* alloc subhandle for the mapping info */ + if (psDstKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevMemOUT->sDstClientMemInfo.hMappingInfo, + psDstKernelMemInfo->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo); + } + else + { + psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = 0; + } +#endif + + /* and setup the sync info */ + if(psDstKernelMemInfo->psKernelSyncInfo) + { +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapDevMemOUT->sDstClientSyncInfo.psSyncData = + psDstKernelMemInfo->psKernelSyncInfo->psSyncData; + psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr = + psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr = + psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapDevMemOUT->sDstClientSyncInfo.sReadOps2CompleteDevVAddr = + psDstKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + +#if defined (SUPPORT_SID_INTERFACE) + /* alloc subhandle for the mapping info */ + if (psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo, + psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo); + } + else + { + psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo = 0; + } +#else + psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo = + psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif +#endif + + psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo; + /* + * The sync info is associated with the device buffer, + * and not allocated here. It isn't exported when created, + * hence the handle allocation rather than a lookup. + */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo, + psDstKernelMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo); + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc) + + return 0; +} + + +static IMG_INT +PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY); + + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psUnmapDevMemIN->hKernelMemInfo, +#else + psUnmapDevMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if (psKernelMemInfo->sShareMemWorkaround.bInUse) + { + psRetOUT->eError = PVRSRVFreeDeviceMemKM(psKernelMemInfo->sShareMemWorkaround.hDevCookieInt, psKernelMemInfo); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVUnmapDeviceMemoryBW: internal error, should expect FreeDeviceMem to fail")); + return 0; + } + } + else + { + psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + psUnmapDevMemIN->hKernelMemInfo, +#else + psUnmapDevMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + + + +static IMG_INT +PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN, + PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + IMG_HANDLE hOSMapInfo; + IMG_HANDLE hDeviceClassBufferInt; + IMG_HANDLE hDevMemContextInt; + PVRSRV_HANDLE_TYPE eHandleType; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY); + + NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2) + + /* + * The buffer to be mapped can belong to a 3rd party display or + * buffer driver, and we don't know which type we have at this + * point. + */ + psMapDevClassMemOUT->eError = + PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, + &hDeviceClassBufferInt, + &eHandleType, + psMapDevClassMemIN->hDeviceClassBuffer); + + if(psMapDevClassMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* get the device memory context */ + psMapDevClassMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + psMapDevClassMemIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psMapDevClassMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Having looked up the handle, now check its type */ + switch(eHandleType) + { +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + case PVRSRV_HANDLE_TYPE_DISP_BUFFER: + case PVRSRV_HANDLE_TYPE_BUF_BUFFER: +#else + case PVRSRV_HANDLE_TYPE_NONE: +#endif + break; + default: + psMapDevClassMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE; + return 0; + } + + psMapDevClassMemOUT->eError = + PVRSRVMapDeviceClassMemoryKM(psPerProc, + hDevMemContextInt, + hDeviceClassBufferInt, + &psMemInfo, + &hOSMapInfo); + if(psMapDevClassMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + OSMemSet(&psMapDevClassMemOUT->sClientMemInfo, + 0, + sizeof(psMapDevClassMemOUT->sClientMemInfo)); + OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo, + 0, + sizeof(psMapDevClassMemOUT->sClientSyncInfo)); + + psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM = + psMemInfo->pvLinAddrKM; + + psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0; + psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; + psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; + psMapDevClassMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize; +#if defined (SUPPORT_SID_INTERFACE) + if (psMemInfo->sMemBlk.hOSMemHandle != 0) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevClassMemOUT->sClientMemInfo.hMappingInfo, + psMemInfo->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapDevClassMemIN->hDeviceClassBuffer); + } + else + { + psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = 0; + } +#else + psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle; +#endif + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapDevClassMemIN->hDeviceClassBuffer); + + psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL; + + /* and setup the sync info */ + if(psMemInfo->psKernelSyncInfo) + { +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapDevClassMemOUT->sClientSyncInfo.psSyncData = + psMemInfo->psKernelSyncInfo->psSyncData; + psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapDevClassMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + +#if defined (SUPPORT_SID_INTERFACE) + if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != 0) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo, + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo); + } + else + { + psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = 0; + } +#else + psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif +#endif + + psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo; + /* + * The sync info is associated with the device buffer, + * and not allocated here. It isn't exported when + * created, hence the handle allocation rather than a + * lookup. + */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo, + psMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo); + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psUnmapDevClassMemIN->hKernelMemInfo, +#else + psUnmapDevClassMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + psUnmapDevClassMemIN->hKernelMemInfo, +#else + psUnmapDevClassMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + + +#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW) +IMG_INT +PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN, + PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc); +#else /* OS_PVRSRV_WRAP_EXT_MEM_BW */ +static IMG_INT +PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN, + PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt; + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL; + IMG_UINT32 ui32PageTableSize = 0; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY); + + NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2) + + /* + * FIXME: This needs reworking - don't use the user supplied page + * table list, get the list from the OS. + */ + psWrapExtMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psWrapExtMemIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psWrapExtMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* get the device memory context */ + psWrapExtMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, + psWrapExtMemIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psWrapExtMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(psWrapExtMemIN->ui32NumPageTableEntries) + { + ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries + * sizeof(IMG_SYS_PHYADDR); + + ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32PageTableSize, + (IMG_VOID **)&psSysPAddr, 0, + "Page Table")); + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + psSysPAddr, + psWrapExtMemIN->psSysPAddr, + ui32PageTableSize) != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0); + /*not nulling pointer, out of scope*/ + return -EFAULT; + } + } + + psWrapExtMemOUT->eError = + PVRSRVWrapExtMemoryKM(hDevCookieInt, + psPerProc, + hDevMemContextInt, + psWrapExtMemIN->ui32ByteSize, + psWrapExtMemIN->ui32PageOffset, + psWrapExtMemIN->bPhysContig, + psSysPAddr, + psWrapExtMemIN->pvLinAddr, + psWrapExtMemIN->ui32Flags, + &psMemInfo); + + if(psWrapExtMemIN->ui32NumPageTableEntries) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32PageTableSize, + (IMG_VOID *)psSysPAddr, 0); + /*not nulling pointer, out of scope*/ + } + + if(psWrapExtMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = + psMemInfo->pvLinAddrKM; + + /* setup the mem info */ + psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0; + psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr; + psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags; + psWrapExtMemOUT->sClientMemInfo.uAllocSize = psMemInfo->uAllocSize; +#if defined (SUPPORT_SID_INTERFACE) +/* see below */ +#else + psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle; +#endif + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + +#if defined (SUPPORT_SID_INTERFACE) + /* alloc subhandle for the mapping info */ + if (psMemInfo->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psWrapExtMemOUT->sClientMemInfo.hMappingInfo, + psMemInfo->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo); + } + else + { + psWrapExtMemOUT->sClientMemInfo.hMappingInfo = 0; + } +#endif + + /* setup the sync info */ +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psWrapExtMemOUT->sClientSyncInfo.psSyncData = + psMemInfo->psKernelSyncInfo->psSyncData; + psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psWrapExtMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + +#if defined (SUPPORT_SID_INTERFACE) + /* alloc subhandle for the mapping info */ + if (psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psWrapExtMemOUT->sClientSyncInfo.hMappingInfo, + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo); + } + else + { + psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = 0; + } +#else + psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif +#endif + + psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo, + (IMG_HANDLE)psMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo); + + COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc) + + return 0; +} +#endif /* OS_PVRSRV_WRAP_EXT_MEM_BW */ + +static IMG_INT +PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, + psUnwrapExtMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psUnwrapExtMemIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} + +#if defined(SUPPORT_ION) +static IMG_INT +PVRSRVMapIonHandleBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_ION_HANDLE *psMapIonIN, + PVRSRV_BRIDGE_OUT_MAP_ION_HANDLE *psMapIonOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + psMapIonOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &psMapIonIN->hDevCookie, + psMapIonIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if (psMapIonOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup device node handle", __FUNCTION__)); + return 0; + } + + psMapIonOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &psMapIonIN->hDevMemContext, + psMapIonIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if (psMapIonOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to lookup memory context handle", __FUNCTION__)); + return 0; + } + + psMapIonOUT->eError = PVRSRVMapIonHandleKM(psPerProc, + psMapIonIN->hDevCookie, + psMapIonIN->hDevMemContext, + psMapIonIN->handle, + psMapIonIN->ui32Attribs, + psMapIonIN->ui32Size, + &psKernelMemInfo); + if (psMapIonOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map ion handle", __FUNCTION__)); + return 0; + } + + OSMemSet(&psMapIonOUT->sClientMemInfo, + 0, + sizeof(psMapIonOUT->sClientMemInfo)); + + psMapIonOUT->sClientMemInfo.pvLinAddrKM = + psKernelMemInfo->pvLinAddrKM; + + psMapIonOUT->sClientMemInfo.pvLinAddr = 0; + psMapIonOUT->sClientMemInfo.sDevVAddr = psKernelMemInfo->sDevVAddr; + psMapIonOUT->sClientMemInfo.ui32Flags = psKernelMemInfo->ui32Flags; + psMapIonOUT->sClientMemInfo.uAllocSize = psKernelMemInfo->uAllocSize; + + /* No mapping info, we map through ion */ + psMapIonOUT->sClientMemInfo.hMappingInfo = IMG_NULL; + + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psMapIonOUT->sClientMemInfo.hKernelMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + if(psMapIonIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ) + { + /* signal no syncinfo */ + OSMemSet(&psMapIonOUT->sClientSyncInfo, + 0, + sizeof (PVRSRV_CLIENT_SYNC_INFO)); + psMapIonOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL; + } + else + { + /* and setup the sync info */ +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapIonOUT->sClientSyncInfo.psSyncData = + psKernelMemInfo->psKernelSyncInfo->psSyncData; + psMapIonOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapIonOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapIonOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + +#if defined (SUPPORT_SID_INTERFACE) + if (psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapIonOUT->sClientSyncInfo.hMappingInfo, + psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapIonOUT->sClientMemInfo.hKernelMemInfo); + } + else + { + psMapIonOUT->sClientSyncInfo.hMappingInfo = 0; + } +#else + psMapIonOUT->sClientSyncInfo.hMappingInfo = + psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif +#endif + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapIonOUT->sClientSyncInfo.hKernelSyncInfo, + psKernelMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psMapIonOUT->sClientMemInfo.hKernelMemInfo); + + psMapIonOUT->sClientMemInfo.psClientSyncInfo = + &psMapIonOUT->sClientSyncInfo; + } + return 0; +} + +static IMG_INT +PVRSRVUnmapIonHandleBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_UNMAP_ION_HANDLE *psUnmapIonIN, + PVRSRV_BRIDGE_RETURN *psUnmapIonOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_ION_HANDLE); + + psUnmapIonOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvKernelMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psUnmapIonIN->hKernelMemInfo, +#else + psUnmapIonIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psUnmapIonOUT->eError != PVRSRV_OK) + { + return 0; + } + + psUnmapIonOUT->eError = PVRSRVUnmapIonHandleKM(pvKernelMemInfo); + + if(psUnmapIonOUT->eError != PVRSRV_OK) + { + return 0; + } + + psUnmapIonOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + psUnmapIonIN->hKernelMemInfo, +#else + psUnmapIonIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + return 0; +} +#endif /* SUPPORT_ION */ + +static IMG_INT +PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN, + PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM); + + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psGetFreeDeviceMemOUT->eError = + PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags, + &psGetFreeDeviceMemOUT->ui32Total, + &psGetFreeDeviceMemOUT->ui32Free, + &psGetFreeDeviceMemOUT->ui32LargestBlock); + + return 0; +} + +static IMG_INT +PVRMMapOSMemHandleToMMapDataBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN, + PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA); + +#if defined (__linux__) || defined (__QNXNTO__) + psMMapDataOUT->eError = + PVRMMapOSMemHandleToMMapData(psPerProc, + psMMapDataIN->hMHandle, + &psMMapDataOUT->ui32MMapOffset, + &psMMapDataOUT->ui32ByteOffset, + &psMMapDataOUT->ui32RealByteSize, + &psMMapDataOUT->ui32UserVAddr); +#else + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psMMapDataIN); + + psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; +#endif + return 0; +} + + +static IMG_INT +PVRMMapReleaseMMapDataBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN, + PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA); + +#if defined (__linux__) || defined (__QNXNTO__) + psMMapDataOUT->eError = + PVRMMapReleaseMMapData(psPerProc, + psMMapDataIN->hMHandle, + &psMMapDataOUT->bMUnmap, + &psMMapDataOUT->ui32RealByteSize, + &psMMapDataOUT->ui32UserVAddr); +#else + + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psMMapDataIN); + + psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; +#endif + return 0; +} + + +#if defined (SUPPORT_SID_INTERFACE) +static IMG_INT +PVRSRVChangeDeviceMemoryAttributesBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS *psChgMemAttribIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hKernelMemInfo, + psChgMemAttribIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVChangeDeviceMemoryAttributesKM(hKernelMemInfo, psChgMemAttribIN->ui32Attribs); + + return 0; +} +#else +static IMG_INT +PVRSRVChangeDeviceMemoryAttributesBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CHG_DEV_MEM_ATTRIBS *psChgMemAttribIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + PVR_UNREFERENCED_PARAMETER(psChgMemAttribIN); + PVR_UNREFERENCED_PARAMETER(psRetOUT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + return 0; +} +#endif + +#ifdef PDUMP +static IMG_INT +PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM(); + psPDumpIsCapturingOUT->eError = PVRSRV_OK; + + return 0; +} + +static IMG_INT +PDumpCommentBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0], + psPDumpCommentIN->ui32Flags); + return 0; +} + +static IMG_INT +PDumpSetFrameBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame); + + return 0; +} + +static IMG_INT +PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpRegDumpIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PDumpRegWithFlagsKM (psPDumpRegDumpIN->szRegRegion, + psPDumpRegDumpIN->sHWReg.ui32RegAddr, + psPDumpRegDumpIN->sHWReg.ui32RegVal, + psPDumpRegDumpIN->ui32Flags); + + return 0; +} + +static IMG_INT +PDumpRegPolBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpRegPolIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + + psRetOUT->eError = + PDumpRegPolWithFlagsKM(psPDumpRegPolIN->szRegRegion, + psPDumpRegPolIN->sHWReg.ui32RegAddr, + psPDumpRegPolIN->sHWReg.ui32RegVal, + psPDumpRegPolIN->ui32Mask, + psPDumpRegPolIN->ui32Flags, + PDUMP_POLL_OPERATOR_EQUAL); + + return 0; +} + +static IMG_INT +PDumpMemPolBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psPDumpMemPolIN->hKernelMemInfo, +#else + psPDumpMemPolIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo), + psPDumpMemPolIN->ui32Offset, + psPDumpMemPolIN->ui32Value, + psPDumpMemPolIN->ui32Mask, + psPDumpMemPolIN->eOperator, + psPDumpMemPolIN->ui32Flags, + MAKEUNIQUETAG(pvMemInfo)); + + return 0; +} + +static IMG_INT +PDumpMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psPDumpMemDumpIN->hKernelMemInfo, +#else + psPDumpMemDumpIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpMemUM(psPerProc, + psPDumpMemDumpIN->pvAltLinAddr, + psPDumpMemDumpIN->pvLinAddr, + pvMemInfo, + psPDumpMemDumpIN->ui32Offset, + psPDumpMemDumpIN->ui32Bytes, + psPDumpMemDumpIN->ui32Flags, + MAKEUNIQUETAG(pvMemInfo)); + + return 0; +} + +static IMG_INT +PDumpBitmapBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt; + + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode, + psPDumpBitmapIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDumpBitmapIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpBitmapKM(psDeviceNode, + &psPDumpBitmapIN->szFileName[0], + psPDumpBitmapIN->ui32FileOffset, + psPDumpBitmapIN->ui32Width, + psPDumpBitmapIN->ui32Height, + psPDumpBitmapIN->ui32StrideInBytes, + psPDumpBitmapIN->sDevBaseAddr, + hDevMemContextInt, + psPDumpBitmapIN->ui32Size, + psPDumpBitmapIN->ePixelFormat, + psPDumpBitmapIN->eMemFormat, + psPDumpBitmapIN->ui32Flags); + + return 0; +} + +static IMG_INT +PDumpReadRegBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID **)&psDeviceNode, + psPDumpReadRegIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + psRetOUT->eError = + PDumpReadRegKM(&psPDumpReadRegIN->szRegRegion[0], + &psPDumpReadRegIN->szFileName[0], + psPDumpReadRegIN->ui32FileOffset, + psPDumpReadRegIN->ui32Address, + psPDumpReadRegIN->ui32Size, + psPDumpReadRegIN->ui32Flags); + + return 0; +} + +static IMG_INT +PDumpMemPagesBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES *psPDumpMemPagesIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPAGES); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpMemPagesIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + + return 0; +} + +static IMG_INT +PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32PDumpFlags; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + ui32PDumpFlags = 0; + if(psPDumpDriverInfoIN->bContinuous) + { + ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS; + } + psRetOUT->eError = + PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0], + ui32PDumpFlags); + + return 0; +} + +static IMG_INT +PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes; + IMG_VOID *pvSyncInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, +#if defined (SUPPORT_SID_INTERFACE) + psPDumpSyncDumpIN->hKernelSyncInfo, +#else + psPDumpSyncDumpIN->psKernelSyncInfo, +#endif + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpMemUM(psPerProc, + psPDumpSyncDumpIN->pvAltLinAddr, + IMG_NULL, + ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM, + psPDumpSyncDumpIN->ui32Offset, + ui32Bytes, + 0, + MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM)); + + return 0; +} + +static IMG_INT +PDumpSyncPolBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32Offset; + IMG_VOID *pvSyncInfo; + IMG_UINT32 ui32Value; + IMG_UINT32 ui32Mask; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSyncInfo, +#if defined (SUPPORT_SID_INTERFACE) + psPDumpSyncPolIN->hKernelSyncInfo, +#else + psPDumpSyncPolIN->psKernelSyncInfo, +#endif + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if(psPDumpSyncPolIN->bIsRead) + { + ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); + } + else + { + ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete); + } + + /* FIXME: Move this code to somewhere outside of the bridge */ + if (psPDumpSyncPolIN->bUseLastOpDumpVal) + { + if(psPDumpSyncPolIN->bIsRead) + { + ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastReadOpDumpVal; + } + else + { + ui32Value = ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncData->ui32LastOpDumpVal; + } + ui32Mask = 0xffffffff; + } + else + { + ui32Value = psPDumpSyncPolIN->ui32Value; + ui32Mask = psPDumpSyncPolIN->ui32Mask; + } + + psRetOUT->eError = + PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM, + ui32Offset, + ui32Value, + ui32Mask, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM)); + + return 0; +} + + +static IMG_INT +PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psDeviceNode, + psPDumpCycleCountRegReadIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + PDumpCycleCountRegRead(&psDeviceNode->sDevId, + psPDumpCycleCountRegReadIN->ui32RegOffset, + psPDumpCycleCountRegReadIN->bLastFrame); + + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + +static IMG_INT +PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo, + psPDumpPDDevPAddrIN->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo, + psPDumpPDDevPAddrIN->ui32Offset, + psPDumpPDDevPAddrIN->sPDDevPAddr, + MAKEUNIQUETAG(pvMemInfo), + PDUMP_PD_UNIQUETAG); + return 0; +} + +static IMG_INT +PDumpStartInitPhaseBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpStartInitPhaseKM(); + + return 0; +} + +static IMG_INT +PDumpStopInitPhaseBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psPerProc); + + psRetOUT->eError = PDumpStopInitPhaseKM(); + + return 0; +} + +#endif /* PDUMP */ + + +static IMG_INT +PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN, + PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_MISC_INFO_KM sMiscInfo = {0}; +#endif + PVRSRV_ERROR eError; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO); + +#if defined (SUPPORT_SID_INTERFACE) + sMiscInfo.ui32StateRequest = psGetMiscInfoIN->sMiscInfo.ui32StateRequest; + sMiscInfo.ui32StatePresent = psGetMiscInfoIN->sMiscInfo.ui32StatePresent; + sMiscInfo.ui32MemoryStrLen = psGetMiscInfoIN->sMiscInfo.ui32MemoryStrLen; + sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr; + + OSMemCopy(&sMiscInfo.sCacheOpCtl, + &psGetMiscInfoIN->sMiscInfo.sCacheOpCtl, + sizeof(sMiscInfo.sCacheOpCtl)); + OSMemCopy(&sMiscInfo.sGetRefCountCtl, + &psGetMiscInfoIN->sMiscInfo.sGetRefCountCtl, + sizeof(sMiscInfo.sGetRefCountCtl)); +#else + OSMemCopy(&psGetMiscInfoOUT->sMiscInfo, + &psGetMiscInfoIN->sMiscInfo, + sizeof(PVRSRV_MISC_INFO)); +#endif + + if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) && + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) && + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0)) + { + /* Client must choose which of memstats and DDK version will be written to + * kernel side buffer */ + psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) || + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0) || + ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0)) + { + /* Alloc kernel side buffer to write into */ +#if defined (SUPPORT_SID_INTERFACE) + ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen, + (IMG_VOID **)&sMiscInfo.pszMemoryStr, 0, + "Output string buffer")); + psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo); + + /* Copy result to user */ + eError = CopyToUserWrapper(psPerProc, ui32BridgeID, + psGetMiscInfoIN->sMiscInfo.pszMemoryStr, + sMiscInfo.pszMemoryStr, + sMiscInfo.ui32MemoryStrLen); +#else + ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen, + (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0, + "Output string buffer")); + + psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo); + + /* Copy result to user */ + eError = CopyToUserWrapper(psPerProc, ui32BridgeID, + psGetMiscInfoIN->sMiscInfo.pszMemoryStr, + psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, + psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen); +#endif + + /* Free kernel side buffer again */ +#if defined (SUPPORT_SID_INTERFACE) + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sMiscInfo.ui32MemoryStrLen, + (IMG_VOID *)sMiscInfo.pszMemoryStr, 0); +#else + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen, + (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0); +#endif + + /* Replace output buffer pointer with input pointer, as both are expected + * to point to the same userspace memory. + */ + psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr; + + if(eError != PVRSRV_OK) + { + /* Do error check at the end as we always have to free and reset the + * pointer. + */ + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user")); + return -EFAULT; + } + } + else + { +#if defined (SUPPORT_SID_INTERFACE) + psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&sMiscInfo); +#else + psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo); +#endif + } + + /* Return on error so exit status of PVRSRVGetMiscInfoKM is propagated to client. + * Don't alloc handles for event object or timer; if error exit status is returned + * the handles should not be used (even if not null) */ + if (psGetMiscInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* + * The handles are not allocated in batch mode as they are shared + * (a shared handle is allocated at most once), and there is no + * resource allocation to undo if the handle allocation fails. + */ +#if defined (SUPPORT_SID_INTERFACE) + if (sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) +#else + if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) +#endif + { + psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM, +#if defined (SUPPORT_SID_INTERFACE) + sMiscInfo.sGlobalEventObject.hOSEventKM, +#else + psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM, +#endif + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + if (psGetMiscInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.szName, + sMiscInfo.sGlobalEventObject.szName, + EVENTOBJNAME_MAXLENGTH); +#endif + } + +#if defined (SUPPORT_SID_INTERFACE) + if (sMiscInfo.hSOCTimerRegisterOSMemHandle) +#else + if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle) +#endif + { + /* Allocate handle for SOC OSMemHandle */ + psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle, +#if defined (SUPPORT_SID_INTERFACE) + sMiscInfo.hSOCTimerRegisterOSMemHandle, +#else + psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle, +#endif + PVRSRV_HANDLE_TYPE_SOC_TIMER, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + if (psGetMiscInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#if defined (SUPPORT_SID_INTERFACE) + else + { + psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle = 0; + } + + /* copy data from local sMiscInfo to OUT */ + psGetMiscInfoOUT->sMiscInfo.ui32StateRequest = sMiscInfo.ui32StateRequest; + psGetMiscInfoOUT->sMiscInfo.ui32StatePresent = sMiscInfo.ui32StatePresent; + + psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterKM = sMiscInfo.pvSOCTimerRegisterKM; + psGetMiscInfoOUT->sMiscInfo.pvSOCTimerRegisterUM = sMiscInfo.pvSOCTimerRegisterUM; + psGetMiscInfoOUT->sMiscInfo.pvSOCClockGateRegs = sMiscInfo.pvSOCClockGateRegs; + + psGetMiscInfoOUT->sMiscInfo.ui32SOCClockGateRegsSize = sMiscInfo.ui32SOCClockGateRegsSize; + + OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion, + &sMiscInfo.aui32DDKVersion, + sizeof(psGetMiscInfoOUT->sMiscInfo.aui32DDKVersion)); + OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl, + &sMiscInfo.sCacheOpCtl, + sizeof(psGetMiscInfoOUT->sMiscInfo.sCacheOpCtl)); + OSMemCopy(&psGetMiscInfoOUT->sMiscInfo.sGetRefCountCtl, + &sMiscInfo.sGetRefCountCtl, + sizeof(psGetMiscInfoOUT->sMiscInfo.sGetRefCountCtl)); +#endif + + return 0; +} + +static IMG_INT +PVRSRVConnectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CONNECT_SERVICES *psConnectServicesIN, + PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES); + +#if defined(PDUMP) + /* Store the per process connection info. + * The Xserver initially connects via PVR2D which sets the persistent flag. + * But, later the Xserver may connect via SGL which doesn't carry the flag (in + * general SGL clients aren't persistent). So we OR in the flag so if it was set + * before it remains set. + */ + if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PERSIST) != 0) + { + psPerProc->bPDumpPersistent = IMG_TRUE; + } + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Select whether this client is our 'active' target for pdumping in a + * multi-process environment. + * NOTE: only 1 active target is supported at present. + */ + if ((psConnectServicesIN->ui32Flags & SRV_FLAGS_PDUMP_ACTIVE) != 0) + { + psPerProc->bPDumpActive = IMG_TRUE; + } +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ +#else + PVR_UNREFERENCED_PARAMETER(psConnectServicesIN); +#endif + psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData; + psConnectServicesOUT->eError = PVRSRV_OK; + + return 0; +} + +static IMG_INT +PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psPerProc); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES); + + /* just return OK, per-process data is cleaned up by resmgr */ + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + +static IMG_INT +PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN, + PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psPerProc); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS); + + psEnumDispClassOUT->eError = + PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass, + &psEnumDispClassOUT->ui32NumDevices, + &psEnumDispClassOUT->ui32DevID[0]); + + return 0; +} + +static IMG_INT +PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN, + PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE); + + NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1) + + psOpenDispClassDeviceOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psOpenDispClassDeviceIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + psOpenDispClassDeviceOUT->eError = + PVRSRVOpenDCDeviceKM(psPerProc, + psOpenDispClassDeviceIN->ui32DeviceID, + hDevCookieInt, + &hDispClassInfoInt); + + if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psOpenDispClassDeviceOUT->hDeviceKM, + hDispClassInfoInt, + PVRSRV_HANDLE_TYPE_DISP_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psCloseDispClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psCloseDispClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + return 0; +} + +static IMG_INT +PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN, + PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS); + + psEnumDispClassFormatsOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psEnumDispClassFormatsIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK) + { + return 0; + } + + psEnumDispClassFormatsOUT->eError = + PVRSRVEnumDCFormatsKM(pvDispClassInfoInt, + &psEnumDispClassFormatsOUT->ui32Count, + psEnumDispClassFormatsOUT->asFormat); + + return 0; +} + +static IMG_INT +PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN, + PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS); + + psEnumDispClassDimsOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psEnumDispClassDimsIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + + if(psEnumDispClassDimsOUT->eError != PVRSRV_OK) + { + return 0; + } + + psEnumDispClassDimsOUT->eError = + PVRSRVEnumDCDimsKM(pvDispClassInfoInt, + &psEnumDispClassDimsIN->sFormat, + &psEnumDispClassDimsOUT->ui32Count, + psEnumDispClassDimsOUT->asDim); + + return 0; +} + +#if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) +static IMG_INT +PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN, //IMG_HANDLE *phGetDispClassSysBufferIN, + PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hBufferInt; + IMG_VOID *pvDispClassInfoInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER); + + NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1) + + psGetDispClassSysBufferOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfoInt, + psGetDispClassSysBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDispClassSysBufferOUT->eError = + PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt, + &hBufferInt); + + if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* PRQA S 1461 6 */ /* ignore warning about enum type being converted */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psGetDispClassSysBufferOUT->hBuffer, + hBufferInt, + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED), + psGetDispClassSysBufferIN->hDeviceKM); + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc) + + return 0; +} +#endif + +static IMG_INT +PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN, + PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO); + + psGetDispClassInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psGetDispClassInfoIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psGetDispClassInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDispClassInfoOUT->eError = + PVRSRVGetDCInfoKM(pvDispClassInfo, + &psGetDispClassInfoOUT->sDisplayInfo); + + return 0; +} + +static IMG_INT +PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN, + PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_HANDLE hSwapChainInt; + IMG_UINT32 ui32SwapChainID; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN); + + NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1) + + psCreateDispClassSwapChainOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psCreateDispClassSwapChainIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + + if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Get ui32SwapChainID from input */ + ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID; + + psCreateDispClassSwapChainOUT->eError = + PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo, + psCreateDispClassSwapChainIN->ui32Flags, + &psCreateDispClassSwapChainIN->sDstSurfAttrib, + &psCreateDispClassSwapChainIN->sSrcSurfAttrib, + psCreateDispClassSwapChainIN->ui32BufferCount, + psCreateDispClassSwapChainIN->ui32OEMFlags, + &hSwapChainInt, + &ui32SwapChainID); + + if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Pass ui32SwapChainID to output */ + psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psCreateDispClassSwapChainOUT->hSwapChain, + hSwapChainInt, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + PVRSRV_HANDLE_ALLOC_FLAG_NONE, + psCreateDispClassSwapChainIN->hDeviceKM); + + COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain, + psDestroyDispClassSwapChainIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVDestroyDCSwapChainKM(pvSwapChain); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psDestroyDispClassSwapChainIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + + return 0; +} + +static IMG_INT +PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassDstRectIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassDstRectIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCDstRectKM(pvDispClassInfo, + pvSwapChain, + &psSetDispClassDstRectIN->sRect); + + return 0; +} + +static IMG_INT +PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassSrcRectIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassSrcRectIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCSrcRectKM(pvDispClassInfo, + pvSwapChain, + &psSetDispClassSrcRectIN->sRect); + + return 0; +} + +static IMG_INT +PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassColKeyIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassColKeyIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCDstColourKeyKM(pvDispClassInfo, + pvSwapChain, + psSetDispClassColKeyIN->ui32CKColour); + + return 0; +} + +static IMG_INT +PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSetDispClassColKeyIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSetDispClassColKeyIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo, + pvSwapChain, + psSetDispClassColKeyIN->ui32CKColour); + + return 0; +} + +static IMG_INT +PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN, + PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + IMG_UINT32 i; +#if defined (SUPPORT_SID_INTERFACE) + IMG_HANDLE *pahBuffer; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS); + + NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) + + psGetDispClassBuffersOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psGetDispClassBuffersIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psGetDispClassBuffersOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetDispClassBuffersOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSwapChain, + psGetDispClassBuffersIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN); + if(psGetDispClassBuffersOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + psGetDispClassBuffersOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS, + (IMG_PVOID *)&pahBuffer, 0, + "Temp Swapchain Buffers"); + + if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) + { + return 0; + } +#endif + + psGetDispClassBuffersOUT->eError = + PVRSRVGetDCBuffersKM(pvDispClassInfo, + pvSwapChain, + &psGetDispClassBuffersOUT->ui32BufferCount, +#if defined (SUPPORT_SID_INTERFACE) + pahBuffer, +#else + psGetDispClassBuffersOUT->ahBuffer, +#endif + psGetDispClassBuffersOUT->asPhyAddr); + if (psGetDispClassBuffersOUT->eError != PVRSRV_OK) + { + return 0; + } + + PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); + + for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hBufferExt; +#else + IMG_HANDLE hBufferExt; +#endif + + /* PRQA S 1461 15 */ /* ignore warning about enum type being converted */ +#if defined (SUPPORT_SID_INTERFACE) + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &hBufferExt, + pahBuffer[i], + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED), + psGetDispClassBuffersIN->hSwapChain); +#else + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &hBufferExt, + psGetDispClassBuffersOUT->ahBuffer[i], + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED), + psGetDispClassBuffersIN->hSwapChain); +#endif + + psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt; + } + +#if defined (SUPPORT_SID_INTERFACE) + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_HANDLE) * PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS, + (IMG_PVOID)pahBuffer, 0); +#endif + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChainBuf; +#if defined (SUPPORT_SID_INTERFACE) + IMG_HANDLE hPrivateTag; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSwapDispClassBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupSubHandle(psPerProc->psHandleBase, + &pvSwapChainBuf, + psSwapDispClassBufferIN->hBuffer, + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + psSwapDispClassBufferIN->hDeviceKM); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + if (psSwapDispClassBufferIN->hPrivateTag != 0) + { + psRetOUT->eError = + PVRSRVLookupSubHandle(psPerProc->psHandleBase, + &hPrivateTag, + psSwapDispClassBufferIN->hPrivateTag, + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + psSwapDispClassBufferIN->hDeviceKM); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + else + { + hPrivateTag = IMG_NULL; + } +#endif + + psRetOUT->eError = + PVRSRVSwapToDCBufferKM(pvDispClassInfo, + pvSwapChainBuf, + psSwapDispClassBufferIN->ui32SwapInterval, +#if defined (SUPPORT_SID_INTERFACE) + hPrivateTag, +#else + psSwapDispClassBufferIN->hPrivateTag, +#endif + psSwapDispClassBufferIN->ui32ClipRectCount, + psSwapDispClassBufferIN->sClipRect); + + return 0; +} + +static IMG_INT +PVRSRVSwapToDCBuffer2BW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER2 *psSwapDispClassBufferIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvPrivData = IMG_NULL; + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSwapDispClassBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up DISP_INFO handle")); + return 0; + } + + psRetOUT->eError = + PVRSRVLookupSubHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSwapDispClassBufferIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + psSwapDispClassBufferIN->hDeviceKM); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up DISP_BUFFER handle")); + return 0; + } + + if(!OSAccessOK(PVR_VERIFY_WRITE, + psSwapDispClassBufferIN->ppsKernelMemInfos, + sizeof(IMG_HANDLE) * psSwapDispClassBufferIN->ui32NumMemInfos)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Access check failed for ppsKernelMemInfos")); + return -EFAULT; + } + + if(!OSAccessOK(PVR_VERIFY_WRITE, + psSwapDispClassBufferIN->ppsKernelSyncInfos, + sizeof(IMG_HANDLE) * psSwapDispClassBufferIN->ui32NumMemInfos)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Access check failed for ppsKernelSyncInfos")); + return -EFAULT; + } + + for (i = 0; i < psSwapDispClassBufferIN->ui32NumMemInfos; i++) + { + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + psSwapDispClassBufferIN->ppsKernelMemInfos[i], + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up MEM_INFO handle")); + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelSyncInfo, + psSwapDispClassBufferIN->ppsKernelSyncInfos[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to look up SYNC_INFO handle")); + return 0; + } + + psSwapDispClassBufferIN->ppsKernelMemInfos[i] = psKernelMemInfo; + psSwapDispClassBufferIN->ppsKernelSyncInfos[i] = psKernelSyncInfo; + } + + if(psSwapDispClassBufferIN->ui32PrivDataLength > 0) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + psSwapDispClassBufferIN->ui32PrivDataLength, + (IMG_VOID **)&pvPrivData, IMG_NULL, + "Swap Command Private Data") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2BW: Failed to allocate private data space")); + return -ENOMEM; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pvPrivData, + psSwapDispClassBufferIN->pvPrivData, + psSwapDispClassBufferIN->ui32PrivDataLength) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSwapToDCBuffer2BW: Failed to copy private data")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psSwapDispClassBufferIN->ui32PrivDataLength, + pvPrivData, IMG_NULL); + return -EFAULT; + } + } + + psRetOUT->eError = + PVRSRVSwapToDCBuffer2KM(pvDispClassInfo, + pvSwapChain, + psSwapDispClassBufferIN->ui32SwapInterval, + psSwapDispClassBufferIN->ppsKernelMemInfos, + psSwapDispClassBufferIN->ppsKernelSyncInfos, + psSwapDispClassBufferIN->ui32NumMemInfos, + pvPrivData, + psSwapDispClassBufferIN->ui32PrivDataLength); + + if(psRetOUT->eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psSwapDispClassBufferIN->ui32PrivDataLength, + pvPrivData, IMG_NULL); + } + + return 0; +} + + + +static IMG_INT +PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvDispClassInfo; + IMG_VOID *pvSwapChain; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvDispClassInfo, + psSwapDispClassSystemIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_DISP_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupSubHandle(psPerProc->psHandleBase, + &pvSwapChain, + psSwapDispClassSystemIN->hSwapChain, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + psSwapDispClassSystemIN->hDeviceKM); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + psRetOUT->eError = + PVRSRVSwapToDCSystemKM(pvDispClassInfo, + pvSwapChain); + + return 0; +} + +static IMG_INT +PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN, + PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hBufClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE); + + NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1) + + psOpenBufferClassDeviceOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psOpenBufferClassDeviceIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + psOpenBufferClassDeviceOUT->eError = + PVRSRVOpenBCDeviceKM(psPerProc, + psOpenBufferClassDeviceIN->ui32DeviceID, + hDevCookieInt, + &hBufClassInfo); + if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK) + { + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psOpenBufferClassDeviceOUT->hDeviceKM, + hBufClassInfo, + PVRSRV_HANDLE_TYPE_BUF_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvBufClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvBufClassInfo, + psCloseBufferClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVCloseBCDeviceKM(pvBufClassInfo); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psCloseBufferClassDeviceIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + + return 0; +} + +static IMG_INT +PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN, + PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvBufClassInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO); + + psGetBufferClassInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvBufClassInfo, + psGetBufferClassInfoIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + if(psGetBufferClassInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetBufferClassInfoOUT->eError = + PVRSRVGetBCInfoKM(pvBufClassInfo, + &psGetBufferClassInfoOUT->sBufferInfo); + return 0; +} + +static IMG_INT +PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN, + PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_VOID *pvBufClassInfo; + IMG_HANDLE hBufferInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER); + + NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1) + + psGetBufferClassBufferOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvBufClassInfo, + psGetBufferClassBufferIN->hDeviceKM, + PVRSRV_HANDLE_TYPE_BUF_INFO); + if(psGetBufferClassBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetBufferClassBufferOUT->eError = + PVRSRVGetBCBufferKM(pvBufClassInfo, + psGetBufferClassBufferIN->ui32BufferIndex, + &hBufferInt); + + if(psGetBufferClassBufferOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* PRQA S 1461 6 */ /* ignore warning about enum type being converted */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psGetBufferClassBufferOUT->hBuffer, + hBufferInt, + PVRSRV_HANDLE_TYPE_BUF_BUFFER, + (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED), + psGetBufferClassBufferIN->hDeviceKM); + + COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc) + + return 0; +} + + +static IMG_INT +PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN, + PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM); + + NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1) + + psAllocSharedSysMemOUT->eError = + PVRSRVAllocSharedSysMemoryKM(psPerProc, + psAllocSharedSysMemIN->ui32Flags, + psAllocSharedSysMemIN->ui32Size, + &psKernelMemInfo); + if(psAllocSharedSysMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo, + 0, + sizeof(psAllocSharedSysMemOUT->sClientMemInfo)); + + psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM = + psKernelMemInfo->pvLinAddrKM; + + psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0; + psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags = + psKernelMemInfo->ui32Flags; + psAllocSharedSysMemOUT->sClientMemInfo.uAllocSize = + psKernelMemInfo->uAllocSize; +#if defined (SUPPORT_SID_INTERFACE) + if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo, + psKernelMemInfo->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + } + else + { + psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = 0; + } +#else + psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle; +#endif + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc) + + return 0; +} + +static IMG_INT +PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN, + PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM); + + psFreeSharedSysMemOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psKernelMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psFreeSharedSysMemIN->hKernelMemInfo, +#else + psFreeSharedSysMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + + if(psFreeSharedSysMemOUT->eError != PVRSRV_OK) + return 0; + + psFreeSharedSysMemOUT->eError = + PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo); + if(psFreeSharedSysMemOUT->eError != PVRSRV_OK) + return 0; +#if defined (SUPPORT_SID_INTERFACE) + if (psFreeSharedSysMemIN->hMappingInfo != 0) + { + psFreeSharedSysMemOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psFreeSharedSysMemIN->hMappingInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + if(psFreeSharedSysMemOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif + + psFreeSharedSysMemOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + psFreeSharedSysMemIN->hKernelMemInfo, +#else + psFreeSharedSysMemIN->psKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + return 0; +} + +static IMG_INT +PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN, + PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + PVRSRV_HANDLE_TYPE eHandleType; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hParent; +#else + IMG_HANDLE hParent; +#endif + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM); + + NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2) + + psMapMemInfoMemOUT->eError = + PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, + (IMG_VOID **)&psKernelMemInfo, + &eHandleType, + psMapMemInfoMemIN->hKernelMemInfo); + if(psMapMemInfoMemOUT->eError != PVRSRV_OK) + { + return 0; + } + + switch (eHandleType) + { +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + case PVRSRV_HANDLE_TYPE_MEM_INFO: + case PVRSRV_HANDLE_TYPE_MEM_INFO_REF: + case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO: +#else + case PVRSRV_HANDLE_TYPE_NONE: +#endif + break; + default: + psMapMemInfoMemOUT->eError = PVRSRV_ERROR_INVALID_HANDLE_TYPE; + return 0; + } + + /* + * To prevent the building up of deep chains of subhandles, parent + * the new meminfo off the parent of the input meminfo, if it has + * a parent. + */ + psMapMemInfoMemOUT->eError = + PVRSRVGetParentHandle(psPerProc->psHandleBase, + &hParent, + psMapMemInfoMemIN->hKernelMemInfo, + eHandleType); + if (psMapMemInfoMemOUT->eError != PVRSRV_OK) + { + return 0; + } +#if defined (SUPPORT_SID_INTERFACE) + if (hParent == 0) +#else + if (hParent == IMG_NULL) +#endif + { + hParent = psMapMemInfoMemIN->hKernelMemInfo; + } + + OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo, + 0, + sizeof(psMapMemInfoMemOUT->sClientMemInfo)); + + psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM = + psKernelMemInfo->pvLinAddrKM; + + psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0; + psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr = + psKernelMemInfo->sDevVAddr; + psMapMemInfoMemOUT->sClientMemInfo.ui32Flags = + psKernelMemInfo->ui32Flags; + psMapMemInfoMemOUT->sClientMemInfo.uAllocSize = + psKernelMemInfo->uAllocSize; +#if defined (SUPPORT_SID_INTERFACE) + if (psKernelMemInfo->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo, + psKernelMemInfo->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + hParent); + } + else + { + psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = 0; + } +#else + psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle; +#endif + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo, + psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + hParent); + + if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) + { + /* signal no syncinfo */ + OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo, + 0, + sizeof (PVRSRV_CLIENT_SYNC_INFO)); + } + else + { + /* and setup the sync info */ +#if !defined(PVRSRV_DISABLE_UM_SYNCOBJ_MAPPINGS) + psMapMemInfoMemOUT->sClientSyncInfo.psSyncData = + psKernelMemInfo->psKernelSyncInfo->psSyncData; + psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr; + psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr; + psMapMemInfoMemOUT->sClientSyncInfo.sReadOps2CompleteDevVAddr = + psKernelMemInfo->psKernelSyncInfo->sReadOps2CompleteDevVAddr; + +#if defined (SUPPORT_SID_INTERFACE) + if (psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle != IMG_NULL) + { + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo, + psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo); + } + else + { + psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = 0; + } +#else + psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = + psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle; +#endif +#endif + + psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo, + psKernelMemInfo->psKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo); + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc) + + return 0; +} + + + +IMG_INT +DummyBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ +#if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); +#endif + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + PVR_UNREFERENCED_PARAMETER(psPerProc); + +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u (%s) mapped to " + "Dummy Wrapper (probably not what you want!)", + __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName)); +#else + PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %u mapped to " + "Dummy Wrapper (probably not what you want!)", + __FUNCTION__, ui32BridgeID)); +#endif + return -ENOTTY; +} + + +/*! + * ***************************************************************************** + * @brief A wrapper for filling in the g_BridgeDispatchTable array that does + * error checking. + * + * @param ui32Index + * @param pszIOCName + * @param pfFunction + * @param pszFunctionName + * + * @return + ********************************************************************************/ +IMG_VOID +_SetDispatchTableEntry(IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName) +{ + static IMG_UINT32 ui32PrevIndex = ~0UL; /* -1 */ +#if !defined(DEBUG) + PVR_UNREFERENCED_PARAMETER(pszIOCName); +#endif +#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM) + PVR_UNREFERENCED_PARAMETER(pszFunctionName); +#endif + +#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) + /* INTEGRATION_POINT: Enable this to dump out the dispatch table entries */ + PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName)); +#endif + + /* We should never be over-writing a previous entry. + * If we are, tell the world about it. + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likly to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at at a performance critical time. + */ + if(g_BridgeDispatchTable[ui32Index].pfFunction) + { +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, + "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s", + __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName)); +#else + PVR_DPF((PVR_DBG_ERROR, + "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%u)", + __FUNCTION__, pszIOCName, ui32Index)); +#endif + PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); + } + + /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly + * interested in spotting any large gap of wasted memory that could be + * accidentally introduced. + * + * This will currently flag up any gaps > 5 entries. + * + * NOTE: This shouldn't be debug only since switching from debug->release + * etc is likly to modify the available ioctls and thus be a point where + * mistakes are exposed. This isn't run at at a performance critical time. + */ +// if((ui32PrevIndex != (IMG_UINT32)-1) && + if((ui32PrevIndex != ~0UL) && + ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) || + (ui32Index <= ui32PrevIndex))) + { +#if defined(DEBUG_BRIDGE_KM) + PVR_DPF((PVR_DBG_WARNING, + "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)", + __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName, + ui32Index, pszIOCName)); +#else + PVR_DPF((PVR_DBG_WARNING, + "%s: There is a gap in the dispatch table between indices %u and %u (%s)", + __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName)); +#endif + PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); + } + + g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; + g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; + g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; + g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; +#endif + + ui32PrevIndex = ui32Index; +} + +static IMG_INT +PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT); + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + /* PRQA S 3415 1 */ /* side effects needed - if any step fails */ + if((OSProcHasPrivSrvInit() == IMG_FALSE) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) + { + psRetOUT->eError = PVRSRV_ERROR_SRV_CONNECT_FAILED; + return 0; + } + +#if defined (__linux__) || defined (__QNXNTO__) + PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE); +#endif + psPerProc->bInitProcess = IMG_TRUE; + + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + + +static IMG_INT +PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT); + + if(!psPerProc->bInitProcess) + { + psRetOUT->eError = PVRSRV_ERROR_SRV_DISCONNECT_FAILED; + return 0; + } + + psPerProc->bInitProcess = IMG_FALSE; + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + psPerProc->bPDumpActive = IMG_FALSE; +#endif + + PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE); + PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE); + + psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful); + + PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL , + ((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful)) + ? IMG_TRUE : IMG_FALSE); + + return 0; +} + + +static IMG_INT +PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hOSEventKM; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT); + + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hOSEventKM, + psEventObjectWaitIN->hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = OSEventObjectWaitKM(hOSEventKM); + + return 0; +} + + +static IMG_INT +PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN, + PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_EVENTOBJECT_KM sEventObject; + IMG_HANDLE hOSEvent; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN); + + NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1) + + psEventObjectOpenOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sEventObject.hOSEventKM, +#else + &psEventObjectOpenIN->sEventObject.hOSEventKM, +#endif + psEventObjectOpenIN->sEventObject.hOSEventKM, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + + if(psEventObjectOpenOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + OSMemCopy(&sEventObject.szName, + &psEventObjectOpenIN->sEventObject.szName, + EVENTOBJNAME_MAXLENGTH); + + psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&sEventObject, &hOSEvent); +#else + psEventObjectOpenOUT->eError = OSEventObjectOpenKM(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent); +#endif + + if(psEventObjectOpenOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) +/* Windows7, WinXP and Vista already use an Index type handle which the client glue uses directly */ +/* Linux requires a SID handle */ +#if !defined (WINXP) && !defined(SUPPORT_VISTA) + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psEventObjectOpenOUT->hOSEvent, + hOSEvent, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI); +#endif +#else + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psEventObjectOpenOUT->hOSEvent, + psEventObjectOpenOUT->hOSEvent, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI); +#endif + + COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc) + + return 0; +} + + +static IMG_INT +PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hOSEventKM; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_EVENTOBJECT_KM sEventObject; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sEventObject.hOSEventKM, +#else + &psEventObjectCloseIN->sEventObject.hOSEventKM, +#endif + psEventObjectCloseIN->sEventObject.hOSEventKM, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &hOSEventKM, + psEventObjectCloseIN->hOSEventKM, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + if(CopyFromUserWrapper(psPerProc, ui32BridgeID, + &sEventObject.szName, + &psEventObjectCloseIN->sEventObject.szName, + EVENTOBJNAME_MAXLENGTH) != PVRSRV_OK) + { + /*not nulling pointer, out of scope*/ + return -EFAULT; + } + + psRetOUT->eError = OSEventObjectCloseKM(&sEventObject, hOSEventKM); +#else + psRetOUT->eError = OSEventObjectCloseKM(&psEventObjectCloseIN->sEventObject, hOSEventKM); +#endif + + return 0; +} + + +typedef struct _MODIFY_SYNC_OP_INFO +{ + IMG_HANDLE hResItem; + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + IMG_UINT32 ui32ModifyFlags; + IMG_UINT32 ui32ReadOpsPendingSnapShot; + IMG_UINT32 ui32WriteOpsPendingSnapShot; + IMG_UINT32 ui32ReadOps2PendingSnapShot; +} MODIFY_SYNC_OP_INFO; + + +static PVRSRV_ERROR DoQuerySyncOpsSatisfied(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + IMG_UINT32 ui32ReadOpsPendingSnapShot, + IMG_UINT32 ui32WriteOpsPendingSnapShot, + IMG_UINT32 ui32ReadOps2PendingSnapShot) +{ + IMG_UINT32 ui32WriteOpsPending; + IMG_UINT32 ui32ReadOpsPending; + IMG_UINT32 ui32ReadOps2Pending; + + /* + * + * We wait until the complete count reaches _or_moves_past_ the + * snapshot value. + * + */ + + if (!psKernelSyncInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + let p be the pending ops count + let c be the complete ops count + let p' be the previously taken snapshot + + if p exceeds c by an amount greater than that by which + p exceeds p', then the condition is not yet satisfied. + + Note that (p - c) can never be negative, and neither can (p - p') + so we can do the comparison using unsigned arithmetic + */ + ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + if((ui32WriteOpsPending - ui32WriteOpsPendingSnapShot >= + ui32WriteOpsPending - psKernelSyncInfo->psSyncData->ui32WriteOpsComplete) && + (ui32ReadOpsPending - ui32ReadOpsPendingSnapShot >= + ui32ReadOpsPending - psKernelSyncInfo->psSyncData->ui32ReadOpsComplete) && + (ui32ReadOps2Pending - ui32ReadOps2PendingSnapShot >= + ui32ReadOps2Pending - psKernelSyncInfo->psSyncData->ui32ReadOps2Complete)) + { +#if defined(PDUMP) && !defined(SUPPORT_VGX) + /* pdump the sync pol: reads */ + PDumpComment("Poll for read ops complete to reach value (pdump: %u, actual snapshot: %u)", + psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal, + ui32ReadOpsPendingSnapShot); + PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + psKernelSyncInfo->psSyncData->ui32LastReadOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, /* * see "NB" below */ + 0, + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); + + /* pdump the sync pol: writes */ + PDumpComment("Poll for write ops complete to reach value (pdump: %u, actual snapshot: %u)", + psKernelSyncInfo->psSyncData->ui32LastOpDumpVal, + ui32WriteOpsPendingSnapShot); + PDumpMemPolKM(psKernelSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + psKernelSyncInfo->psSyncData->ui32LastOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_EQUAL, /* * see "NB" below */ + 0, + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); + /* NB: FIXME -- really need to POL on an expression to + accurately reflect the condition we need to check. How to + do this in PDUMP? */ +#endif + return PVRSRV_OK; + } + else + { + return PVRSRV_ERROR_RETRY; + } +} + + +static PVRSRV_ERROR DoModifyCompleteSyncOps(MODIFY_SYNC_OP_INFO *psModSyncOpInfo) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + + psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo; + + if (!psKernelSyncInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* If user has used the API correctly, we will always have reached the pending snapshot. + We should catch this error on the client side of the bridge and report it in an obvious way */ + if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32WriteOpsComplete) + || (psModSyncOpInfo->ui32ReadOpsPendingSnapShot != psKernelSyncInfo->psSyncData->ui32ReadOpsComplete)) + { + return PVRSRV_ERROR_BAD_SYNC_STATE; + } + + /* update the WOpComplete */ + if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC) + { + psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++; + } + + /* update the ROpComplete */ + if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC) + { + psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++; + } + + return PVRSRV_OK; +} + + +static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + if (!pvParam) + { + PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam; + + if (psModSyncOpInfo->psKernelSyncInfo) + { + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo, + psModSyncOpInfo->ui32ReadOpsPendingSnapShot, + psModSyncOpInfo->ui32WriteOpsPendingSnapShot, + psModSyncOpInfo->ui32ReadOps2PendingSnapShot) == PVRSRV_OK) + { + goto OpFlushedComplete; + } + PVR_DPF((PVR_DBG_WARNING, "ModifyCompleteSyncOpsCallBack: waiting for current Ops to flush")); + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: timeout whilst waiting for current Ops to flush.")); + PVR_DPF((PVR_DBG_ERROR, " Write ops pending snapshot = %d, write ops complete = %d", + psModSyncOpInfo->ui32WriteOpsPendingSnapShot, + psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)); + PVR_DPF((PVR_DBG_ERROR, " Read ops pending snapshot = %d, read ops complete = %d", + psModSyncOpInfo->ui32ReadOpsPendingSnapShot, + psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOpsComplete)); + PVR_DPF((PVR_DBG_ERROR, " Read ops pending snapshot = %d, read ops2 complete = %d", + psModSyncOpInfo->ui32ReadOps2PendingSnapShot, + psModSyncOpInfo->psKernelSyncInfo->psSyncData->ui32ReadOps2Complete)); + return PVRSRV_ERROR_TIMEOUT; + +OpFlushedComplete: + DoModifyCompleteSyncOps(psModSyncOpInfo); + PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0); + + /* re-kick all services managed devices */ + PVRSRVScheduleDeviceCallbacks(); + + return PVRSRV_OK; +} + + +static IMG_INT +PVRSRVCreateSyncInfoModObjBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + PVRSRV_BRIDGE_OUT_CREATE_SYNC_INFO_MOD_OBJ *psCreateSyncInfoModObjOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVR_UNREFERENCED_PARAMETER(psBridgeIn); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ); + + NEW_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc, 1) + + ASSIGN_AND_EXIT_ON_ERROR(psCreateSyncInfoModObjOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(MODIFY_SYNC_OP_INFO), + (IMG_VOID **)&psModSyncOpInfo, 0, + "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)")); + + psModSyncOpInfo->psKernelSyncInfo = IMG_NULL; /* mark it as empty */ + + psCreateSyncInfoModObjOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psCreateSyncInfoModObjOUT->hKernelSyncInfoModObj, + psModSyncOpInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ, + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE); + + if (psCreateSyncInfoModObjOUT->eError != PVRSRV_OK) + { + return 0; + } + + psModSyncOpInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_MODIFY_SYNC_OPS, + psModSyncOpInfo, + 0, + &ModifyCompleteSyncOpsCallBack); + + COMMIT_HANDLE_BATCH_OR_ERROR(psCreateSyncInfoModObjOUT->eError, psPerProc) + + return 0; +} + + +static IMG_INT +PVRSRVDestroySyncInfoModObjBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DESTROY_SYNC_INFO_MOD_OBJ *psDestroySyncInfoModObjIN, + PVRSRV_BRIDGE_RETURN *psDestroySyncInfoModObjOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ); + + psDestroySyncInfoModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psDestroySyncInfoModObjIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo != IMG_NULL) + { + /* Not empty */ + psDestroySyncInfoModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL); + + psDestroySyncInfoModObjOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psDestroySyncInfoModObjIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + + if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: PVRSRVReleaseHandle failed")); + return 0; + } + + psDestroySyncInfoModObjOUT->eError = ResManFreeResByPtr(psModSyncOpInfo->hResItem, CLEANUP_WITH_POLL); + if (psDestroySyncInfoModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDestroySyncInfoModObjBW: ResManFreeResByPtr failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN, + PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS); + + psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psModifySyncOpsIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelSyncInfo, + psModifySyncOpsIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo) + { + /* SyncInfoModification is not empty */ + psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY; + PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo Modification object is not empty")); + return 0; + } + + /* Should never happen, but check to be sure */ + if (psKernelSyncInfo == IMG_NULL) + { + psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVModifyPendingSyncOpsBW: SyncInfo bad handle")); + return 0; + } + + PVRSRVKernelSyncInfoIncRef(psKernelSyncInfo, IMG_NULL); + /* setup info to store in resman */ + psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo; + psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags; + psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + psModSyncOpInfo->ui32ReadOps2PendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + /* We return PRE-INCREMENTED versions of all sync Op Values */ + + psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + psModifySyncOpsOUT->ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC) + { + psKernelSyncInfo->psSyncData->ui32WriteOpsPending++; + } + + if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC) + { + psKernelSyncInfo->psSyncData->ui32ReadOpsPending++; + } + + /* pull the resman item to the front of the list */ + psModifySyncOpsOUT->eError = ResManDissociateRes(psModSyncOpInfo->hResItem, + psPerProc->hResManContext); + + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN, + PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS); + + psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psModifySyncOpsIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL) + { + /* Empty */ + psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + psModifySyncOpsOUT->eError = DoModifyCompleteSyncOps(psModSyncOpInfo); + + if (psModifySyncOpsOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: DoModifyCompleteSyncOps failed")); + return 0; + } + + PVRSRVKernelSyncInfoDecRef(psModSyncOpInfo->psKernelSyncInfo, IMG_NULL); + psModSyncOpInfo->psKernelSyncInfo = IMG_NULL; + + /* re-kick all services managed devices */ + PVRSRVScheduleDeviceCallbacks(); + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsTakeTokenBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenIN, + PVRSRV_BRIDGE_OUT_SYNC_OPS_TAKE_TOKEN *psSyncOpsTakeTokenOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN); + + psSyncOpsTakeTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelSyncInfo, + psSyncOpsTakeTokenIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psSyncOpsTakeTokenOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsTakeTokenBW: PVRSRVLookupHandle failed")); + return 0; + } + + /* We return PRE-INCREMENTED versions of all sync Op Values */ + + psSyncOpsTakeTokenOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending; + psSyncOpsTakeTokenOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending; + psSyncOpsTakeTokenOUT->ui32ReadOps2Pending = psKernelSyncInfo->psSyncData->ui32ReadOps2Pending; + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsFlushToTokenBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_TOKEN *psSyncOpsFlushToTokenIN, + PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToTokenOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + IMG_UINT32 ui32ReadOpsPendingSnapshot; + IMG_UINT32 ui32WriteOpsPendingSnapshot; + IMG_UINT32 ui32ReadOps2PendingSnapshot; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN); + + psSyncOpsFlushToTokenOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psKernelSyncInfo, + psSyncOpsFlushToTokenIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: PVRSRVLookupHandle failed")); + return 0; + } + + ui32ReadOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOpsPendingSnapshot; + ui32WriteOpsPendingSnapshot = psSyncOpsFlushToTokenIN->ui32WriteOpsPendingSnapshot; + ui32ReadOps2PendingSnapshot = psSyncOpsFlushToTokenIN->ui32ReadOps2PendingSnapshot; + + psSyncOpsFlushToTokenOUT->eError = DoQuerySyncOpsSatisfied(psKernelSyncInfo, + ui32ReadOpsPendingSnapshot, + ui32WriteOpsPendingSnapshot, + ui32ReadOps2PendingSnapshot); + + if (psSyncOpsFlushToTokenOUT->eError != PVRSRV_OK && psSyncOpsFlushToTokenOUT->eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToTokenBW: DoQuerySyncOpsSatisfied failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsFlushToModObjBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_MOD_OBJ *psSyncOpsFlushToModObjIN, + PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToModObjOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + MODIFY_SYNC_OP_INFO *psModSyncOpInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ); + + psSyncOpsFlushToModObjOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psModSyncOpInfo, + psSyncOpsFlushToModObjIN->hKernelSyncInfoModObj, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ); + if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: PVRSRVLookupHandle failed")); + return 0; + } + + if(psModSyncOpInfo->psKernelSyncInfo == IMG_NULL) + { + /* Empty */ + psSyncOpsFlushToModObjOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + + psSyncOpsFlushToModObjOUT->eError = DoQuerySyncOpsSatisfied(psModSyncOpInfo->psKernelSyncInfo, + psModSyncOpInfo->ui32ReadOpsPendingSnapShot, + psModSyncOpInfo->ui32WriteOpsPendingSnapShot, + psModSyncOpInfo->ui32ReadOps2PendingSnapShot); + + if (psSyncOpsFlushToModObjOUT->eError != PVRSRV_OK && psSyncOpsFlushToModObjOUT->eError != PVRSRV_ERROR_RETRY) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToModObjBW: DoQuerySyncOpsSatisfied failed")); + return 0; + } + + return 0; +} + + +static IMG_INT +PVRSRVSyncOpsFlushToDeltaBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SYNC_OPS_FLUSH_TO_DELTA *psSyncOpsFlushToDeltaIN, + PVRSRV_BRIDGE_RETURN *psSyncOpsFlushToDeltaOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + IMG_UINT32 ui32DeltaRead; + IMG_UINT32 ui32DeltaWrite; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA); + + psSyncOpsFlushToDeltaOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psSyncInfo, + psSyncOpsFlushToDeltaIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (psSyncOpsFlushToDeltaOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncOpsFlushToDeltaBW: PVRSRVLookupHandle failed")); + return 0; + } + + /* FIXME: there's logic here in the bridge-wrapper - this needs to be moved to + a better place */ + + ui32DeltaRead = psSyncInfo->psSyncData->ui32ReadOpsPending - psSyncInfo->psSyncData->ui32ReadOpsComplete; + ui32DeltaWrite = psSyncInfo->psSyncData->ui32WriteOpsPending - psSyncInfo->psSyncData->ui32WriteOpsComplete; + + if (ui32DeltaRead <= psSyncOpsFlushToDeltaIN->ui32Delta && ui32DeltaWrite <= psSyncOpsFlushToDeltaIN->ui32Delta) + { +#if defined(PDUMP) && !defined(SUPPORT_VGX) + /* pdump the sync pol: reads */ + PDumpComment("Poll for read ops complete to delta (%u)", + psSyncOpsFlushToDeltaIN->ui32Delta); + psSyncOpsFlushToDeltaOUT->eError = + PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_GREATEREQUAL, + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + + /* pdump the sync pol: writes */ + PDumpComment("Poll for write ops complete to delta (%u)", + psSyncOpsFlushToDeltaIN->ui32Delta); + psSyncOpsFlushToDeltaOUT->eError = + PDumpMemPolKM(psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + psSyncInfo->psSyncData->ui32LastOpDumpVal, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_GREATEREQUAL, + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); +#endif + + psSyncOpsFlushToDeltaOUT->eError = PVRSRV_OK; + } + else + { + psSyncOpsFlushToDeltaOUT->eError = PVRSRV_ERROR_RETRY; + } + + return 0; +} + + +static PVRSRV_ERROR +FreeSyncInfoCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)pvParam; + + PVRSRVKernelSyncInfoDecRef(psSyncInfo, IMG_NULL); + + return PVRSRV_OK; +} + + +static IMG_INT +PVRSRVAllocSyncInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_ALLOC_SYNC_INFO *psAllocSyncInfoIN, + PVRSRV_BRIDGE_OUT_ALLOC_SYNC_INFO *psAllocSyncInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContext; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SYNC_INFO); + + NEW_HANDLE_BATCH_OR_ERROR(psAllocSyncInfoOUT->eError, psPerProc, 1) + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_HANDLE *)&psDeviceNode, + psAllocSyncInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(eError != PVRSRV_OK) + { + goto allocsyncinfo_errorexit; + } + + hDevMemContext = psDeviceNode->sDevMemoryInfo.pBMKernelContext; + + eError = PVRSRVAllocSyncInfoKM(psDeviceNode, + hDevMemContext, + &psSyncInfo); + + if (eError != PVRSRV_OK) + { + goto allocsyncinfo_errorexit; + } + + eError = PVRSRVAllocHandle(psPerProc->psHandleBase, + &psAllocSyncInfoOUT->hKernelSyncInfo, + psSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE); + + if(eError != PVRSRV_OK) + { + goto allocsyncinfo_errorexit_freesyncinfo; + } + + psSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SYNC_INFO, + psSyncInfo, + 0, + &FreeSyncInfoCallback); + + /* Success */ + goto allocsyncinfo_commit; + + /* Error handling */ + allocsyncinfo_errorexit_freesyncinfo: + PVRSRVKernelSyncInfoDecRef(psSyncInfo, IMG_NULL); + + allocsyncinfo_errorexit: + + /* Common exit */ + allocsyncinfo_commit: + psAllocSyncInfoOUT->eError = eError; + COMMIT_HANDLE_BATCH_OR_ERROR(eError, psPerProc); + + return 0; +} + + +static IMG_INT +PVRSRVFreeSyncInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_FREE_SYNC_INFO *psFreeSyncInfoIN, + PVRSRV_BRIDGE_RETURN *psFreeSyncInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SYNC_INFO); + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psSyncInfo, + psFreeSyncInfoIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVLookupHandle failed")); + psFreeSyncInfoOUT->eError = eError; + return 0; + } + + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psFreeSyncInfoIN->hKernelSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: PVRSRVReleaseHandle failed")); + psFreeSyncInfoOUT->eError = eError; + return 0; + } + + eError = ResManFreeResByPtr(psSyncInfo->hResItem, CLEANUP_WITH_POLL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeSyncInfoBW: ResManFreeResByPtr failed")); + psFreeSyncInfoOUT->eError = eError; + return 0; + } + + return 0; +} + + +PVRSRV_ERROR +CommonBridgeInit(IMG_VOID) +{ + IMG_UINT32 i; + + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CHG_DEV_MEM_ATTRIBS, PVRSRVChangeDeviceMemoryAttributesBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY_2, PVRSRVMapDeviceMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2, PVRSRVExportDeviceMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MULTI_MANAGE_DEV_MEM, PVRSRVMultiManageDevMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CORE_CMD_RESERVED_1, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CORE_CMD_RESERVED_2, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CORE_CMD_RESERVED_3, DummyBW); +#if defined(SUPPORT_ION) + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_ION_HANDLE, PVRSRVMapIonHandleBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_ION_HANDLE, PVRSRVUnmapIonHandleBW); +#endif + + /* SIM */ + SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW); + + /* User Mapping */ + SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW); + + /* API to retrieve misc. info. from services */ + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW); + + /* Overlay ioctls */ +#if defined (SUPPORT_OVERLAY_ROTATE_BLIT) + SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW); +#endif + + + /* PDUMP */ +#if defined(PDUMP) + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPAGES, PDumpMemPagesBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW); +#endif /* defined(PDUMP) */ + + /* DisplayClass APIs */ + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW); + + /* device class enum */ + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW); + + /* display class API */ + SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW); +#if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW); +#else + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, DummyBW); +#endif + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER2, PVRSRVSwapToDCBuffer2BW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW); + + /* buffer class API */ + SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW); + + /* Wrap/Unwrap external memory */ + SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW); + + /* Shared memory */ + SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW); + + /* Intialisation Service support */ + SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, &PVRSRVInitSrvConnectBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, &PVRSRVInitSrvDisconnectBW); + + /* Event Object */ + SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, &PVRSRVEventObjectWaitBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, &PVRSRVEventObjectOpenBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, &PVRSRVEventObjectCloseBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_SYNC_INFO_MOD_OBJ, PVRSRVCreateSyncInfoModObjBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_SYNC_INFO_MOD_OBJ, PVRSRVDestroySyncInfoModObjBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_TAKE_TOKEN, PVRSRVSyncOpsTakeTokenBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_TOKEN, PVRSRVSyncOpsFlushToTokenBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_MOD_OBJ, PVRSRVSyncOpsFlushToModObjBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC_OPS_FLUSH_TO_DELTA, PVRSRVSyncOpsFlushToDeltaBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SYNC_INFO, PVRSRVAllocSyncInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SYNC_INFO, PVRSRVFreeSyncInfoBW); + +#if defined (SUPPORT_SGX) + SetSGXDispatchTableEntry(); +#endif +#if defined (SUPPORT_VGX) + SetVGXDispatchTableEntry(); +#endif +#if defined (SUPPORT_MSVDX) + SetMSVDXDispatchTableEntry(); +#endif + + /* A safety net to help ensure there won't be any un-initialised dispatch + * table entries... */ + /* Note: This is specifically done _after_ setting all the dispatch entries + * so that SetDispatchTableEntry can detect mistakes where entries + * overlap */ + for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++) + { + if(!g_BridgeDispatchTable[i].pfFunction) + { + g_BridgeDispatchTable[i].pfFunction = &DummyBW; +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY"; + g_BridgeDispatchTable[i].pszFunctionName = "DummyBW"; + g_BridgeDispatchTable[i].ui32CallCount = 0; + g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0; + g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0; +#endif + } + } + + return PVRSRV_OK; +} + +IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc, + PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM) +{ + IMG_VOID * psBridgeIn; + IMG_VOID * psBridgeOut; + BridgeWrapperFunction pfBridgeHandler; + IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID; + IMG_INT err = -EFAULT; + +#if defined(DEBUG_TRACE_BRIDGE_KM) + PVR_DPF((PVR_DBG_ERROR, "%s: %s", + __FUNCTION__, + g_BridgeDispatchTable[ui32BridgeID].pszIOCName)); +#endif + +#if defined(DEBUG_BRIDGE_KM) + g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++; + g_BridgeGlobalStats.ui32IOCTLCount++; +#endif + + if(!psPerProc->bInitProcess) + { + if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN)) + { + if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.", + __FUNCTION__)); + goto return_fault; + } + } + else + { + if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress", + __FUNCTION__)); + goto return_fault; + } + else + { + /* Only certain operations are allowed */ + switch(ui32BridgeID) + { + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT): + case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT): + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.", + __FUNCTION__)); + goto return_fault; + } + } + } + } + +#if defined(__linux__) + { + /* This should be moved into the linux specific code */ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* We have already set up some static buffers to store our ioctl data... */ + psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData; + psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE); + + /* check we are not using a bigger bridge than allocated */ + if((psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) || + (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE)) + { + goto return_fault; + } + + + if(psBridgePackageKM->ui32InBufferSize > 0) + { + if(!OSAccessOK(PVR_VERIFY_READ, + psBridgePackageKM->pvParamIn, + psBridgePackageKM->ui32InBufferSize)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__)); + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + psBridgeIn, + psBridgePackageKM->pvParamIn, + psBridgePackageKM->ui32InBufferSize) + != PVRSRV_OK) + { + goto return_fault; + } + } + } +#else + psBridgeIn = psBridgePackageKM->pvParamIn; + psBridgeOut = psBridgePackageKM->pvParamOut; +#endif + + if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!", + __FUNCTION__, ui32BridgeID)); + goto return_fault; + } + pfBridgeHandler = + (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction; + err = pfBridgeHandler(ui32BridgeID, + psBridgeIn, + psBridgeOut, + psPerProc); + if(err < 0) + { + goto return_fault; + } + +#if defined(__linux__) + /* This should be moved into the linux specific code */ + if(CopyToUserWrapper(psPerProc, + ui32BridgeID, + psBridgePackageKM->pvParamOut, + psBridgeOut, + psBridgePackageKM->ui32OutBufferSize) + != PVRSRV_OK) + { + goto return_fault; + } +#endif + + err = 0; +return_fault: + + ReleaseHandleBatch(psPerProc); + return err; +} + +/****************************************************************************** + End of file (bridged_pvr_bridge.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/bridged/bridged_pvr_bridge.h b/pvr-source/services4/srvkm/bridged/bridged_pvr_bridge.h new file mode 100644 index 0000000..b0145f7 --- /dev/null +++ b/pvr-source/services4/srvkm/bridged/bridged_pvr_bridge.h @@ -0,0 +1,257 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __BRIDGED_PVR_BRIDGE_H__ +#define __BRIDGED_PVR_BRIDGE_H__ + +#include "pvr_bridge.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__linux__) +#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X) +#else +#define PVRSRV_GET_BRIDGE_ID(X) ((X) - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST)) +#endif + +#ifndef ENOMEM +#define ENOMEM 12 +#endif +#ifndef EFAULT +#define EFAULT 14 +#endif +#ifndef ENOTTY +#define ENOTTY 25 +#endif + +#if defined(DEBUG_BRIDGE_KM) +PVRSRV_ERROR +CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size); +PVRSRV_ERROR +CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, + IMG_UINT32 ui32BridgeID, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Size); +#else +#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \ + OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size) +#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \ + OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size) +#endif + + +#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \ + do \ + { \ + (error) = (src); \ + if ((error) != PVRSRV_OK) \ + { \ + return (res); \ + } \ + } while ((error) != PVRSRV_OK); + +#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \ + ASSIGN_AND_RETURN_ON_ERROR(error, src, 0) + +#if defined (PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) +#ifdef INLINE_IS_PRAGMA +#pragma inline(NewHandleBatch) +#endif +static INLINE PVRSRV_ERROR +NewHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32BatchSize) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(!psPerProc->bHandlesBatched); + + eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize); + + if (eError == PVRSRV_OK) + { + psPerProc->bHandlesBatched = IMG_TRUE; + } + + return eError; +} + +#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \ + ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize)) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(CommitHandleBatch) +#endif +static INLINE PVRSRV_ERROR +CommitHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVR_ASSERT(psPerProc->bHandlesBatched); + + psPerProc->bHandlesBatched = IMG_FALSE; + + return PVRSRVCommitHandleBatch(psPerProc->psHandleBase); +} + + +#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \ + ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc)) + +#ifdef INLINE_IS_PRAGMA +#pragma inline(ReleaseHandleBatch) +#endif +static INLINE IMG_VOID +ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + if (psPerProc->bHandlesBatched) + { + psPerProc->bHandlesBatched = IMG_FALSE; + + PVRSRVReleaseHandleBatch(psPerProc->psHandleBase); + } +} +#else /* defined(PVR_SECURE_HANDLES) */ +#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) +#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) +#define ReleaseHandleBatch(psPerProc) +#endif /* defined(PVR_SECURE_HANDLES) */ + +IMG_INT +DummyBW(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID, + IMG_VOID *psBridgeIn, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY +{ + BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl + arguments before calling into srvkm proper */ +#if defined(DEBUG_BRIDGE_KM) + const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */ + const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */ + IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */ + IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ + IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from + userspace within this ioctl */ +#endif +}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY; + +#if defined(SUPPORT_VGX) || defined(SUPPORT_MSVDX) + #if defined(SUPPORT_VGX) + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_VGX_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_VGX_CMD + #else + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_MSVDX_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_MSVDX_CMD + #endif +#else + #if defined(SUPPORT_SGX) + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD + #else + #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1) + #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD + #endif +#endif + +extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; + +IMG_VOID +_SetDispatchTableEntry(IMG_UINT32 ui32Index, + const IMG_CHAR *pszIOCName, + BridgeWrapperFunction pfFunction, + const IMG_CHAR *pszFunctionName); + + +/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */ +#define SetDispatchTableEntry(ui32Index, pfFunction) \ + _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction) + +#define DISPATCH_TABLE_GAP_THRESHOLD 5 + +#if defined(DEBUG) +#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y)) +#else +#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X) +#endif + + +#if defined(DEBUG_BRIDGE_KM) +typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS +{ + IMG_UINT32 ui32IOCTLCount; + IMG_UINT32 ui32TotalCopyFromUserBytes; + IMG_UINT32 ui32TotalCopyToUserBytes; +}PVRSRV_BRIDGE_GLOBAL_STATS; + +/* OS specific code way want to report the stats held here and within the + * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a + * proc entry /proc/pvr/bridge_stats. Ref printLinuxBridgeStats()) */ +extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; +#endif + + +PVRSRV_ERROR CommonBridgeInit(IMG_VOID); + +IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc, + PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM); + +#if defined (__cplusplus) +} +#endif + +#endif /* __BRIDGED_PVR_BRIDGE_H__ */ + +/****************************************************************************** + End of file (bridged_pvr_bridge.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/bridged/bridged_support.c b/pvr-source/services4/srvkm/bridged/bridged_support.c new file mode 100644 index 0000000..25baf29 --- /dev/null +++ b/pvr-source/services4/srvkm/bridged/bridged_support.c @@ -0,0 +1,117 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Support Functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description User/kernel mode bridge support. The functions in here + may be used beyond the bridge code proper (e.g. Linux + mmap interface). +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "servicesint.h" +#include "bridged_support.h" + + +/* + * Derive the internal OS specific memory handle from a secure + * handle. + */ +PVRSRV_ERROR +#if defined (SUPPORT_SID_INTERFACE) +PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_SID hMHandle) +#else +PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle) +#endif +{ + IMG_HANDLE hMHandleInt; + PVRSRV_HANDLE_TYPE eHandleType; + PVRSRV_ERROR eError; + + /* + * We don't know the type of the handle at this point, so we use + * PVRSRVLookupHandleAnyType to look it up. + */ + eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt, + &eHandleType, + hMHandle); + if(eError != PVRSRV_OK) + { + return eError; + } + + switch(eHandleType) + { +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + case PVRSRV_HANDLE_TYPE_MEM_INFO: + case PVRSRV_HANDLE_TYPE_MEM_INFO_REF: + case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO: + { + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hMHandleInt; + + *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle; + + break; + } + case PVRSRV_HANDLE_TYPE_SYNC_INFO: + { + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hMHandleInt; + PVRSRV_KERNEL_MEM_INFO *psMemInfo = psSyncInfo->psSyncDataMemInfoKM; + + *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle; + + break; + } + case PVRSRV_HANDLE_TYPE_SOC_TIMER: + { + *phOSMemHandle = (IMG_VOID *)hMHandleInt; + break; + } +#else + case PVRSRV_HANDLE_TYPE_NONE: + *phOSMemHandle = (IMG_VOID *)hMHandleInt; + break; +#endif + default: + return PVRSRV_ERROR_BAD_MAPPING; + } + + return PVRSRV_OK; +} +/****************************************************************************** + End of file (bridged_support.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/bridged/bridged_support.h b/pvr-source/services4/srvkm/bridged/bridged_support.h new file mode 100644 index 0000000..04d5168 --- /dev/null +++ b/pvr-source/services4/srvkm/bridged/bridged_support.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Support +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description User/kernel mode bridge support. The functions in here + may be used beyond the bridge code proper (e.g. Linux + mmap interface). +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __BRIDGED_SUPPORT_H__ +#define __BRIDGED_SUPPORT_H__ + +#include "handle.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Derive the internal OS specific memory handle from a secure + * handle. + */ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_SID hMHandle); +#else +PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle); +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* __BRIDGED_SUPPORT_H__ */ + +/****************************************************************************** + End of file (bridged_support.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c b/pvr-source/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c new file mode 100644 index 0000000..bee5dc6 --- /dev/null +++ b/pvr-source/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c @@ -0,0 +1,3864 @@ +/*************************************************************************/ /*! +@Title SGX Common Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + + +#include <stddef.h> + +#include "img_defs.h" + +#if defined(SUPPORT_SGX) + +#include "services.h" +#include "pvr_debug.h" +#include "pvr_bridge.h" +#include "sgx_bridge.h" +#include "perproc.h" +#include "power.h" +#include "pvr_bridge_km.h" +#include "sgx_bridge_km.h" +#include "sgx_options.h" + +#if defined(SUPPORT_MSVDX) + #include "msvdx_bridge.h" +#endif + +#include "bridged_pvr_bridge.h" +#include "bridged_sgx_bridge.h" +#include "sgxutils.h" +#include "buffer_manager.h" +#include "pdump_km.h" + +static IMG_INT +SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN, + PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO); + + psGetClientInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psGetClientInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psGetClientInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psGetClientInfoOUT->eError = + SGXGetClientInfoKM(hDevCookieInt, + &psGetClientInfoOUT->sClientInfo); + return 0; +} + +static IMG_INT +SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psReleaseClientInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice; + + PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0); + + /* + * psDevInfo->ui32ClientRefCount can be zero if an error occurred before SGXGetClientInfo is called + */ + if (psDevInfo->ui32ClientRefCount > 0) + { + psDevInfo->ui32ClientRefCount--; + } + + psRetOUT->eError = PVRSRV_OK; + + return 0; +} + + +static IMG_INT +SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN, + PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; +#if defined (SUPPORT_SID_INTERFACE) + SGX_INTERNAL_DEVINFO_KM sSGXInternalDevInfo; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO); + + psSGXGetInternalDevInfoOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXGetInternalDevInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXGetInternalDevInfoOUT->eError = + SGXGetInternalDevInfoKM(hDevCookieInt, +#if defined (SUPPORT_SID_INTERFACE) + &sSGXInternalDevInfo); +#else + &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo); +#endif + + /* + * Handle is not allocated in batch mode, as there is no resource + * allocation to undo if the handle allocation fails. + */ + psSGXGetInternalDevInfoOUT->eError = + PVRSRVAllocHandle(psPerProc->psHandleBase, + &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle, +#if defined (SUPPORT_SID_INTERFACE) + sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle, +#else + psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + + return 0; +} + + +static IMG_INT +SGXDoKickBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_UINT32 i; + IMG_INT ret = 0; + IMG_UINT32 ui32NumDstSyncs; +#if defined (SUPPORT_SID_INTERFACE) + SGX_CCB_KICK_KM sCCBKickKM = {{0}}; + IMG_HANDLE ahSyncInfoHandles[16]; +#else + IMG_HANDLE *phKernelSyncInfoHandles = IMG_NULL; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psDoKickIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.hCCBKernelMemInfo, +#else + &psDoKickIN->sCCBKick.hCCBKernelMemInfo, +#endif + psDoKickIN->sCCBKick.hCCBKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + if (psDoKickIN->sCCBKick.ui32NumDstSyncObjects > 16) + { + return 0; + } + + if(psDoKickIN->sCCBKick.hTA3DSyncInfo != 0) +#else + if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL) +#endif + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.hTA3DSyncInfo, +#else + &psDoKickIN->sCCBKick.hTA3DSyncInfo, +#endif + psDoKickIN->sCCBKick.hTA3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + +#if defined (SUPPORT_SID_INTERFACE) + if(psDoKickIN->sCCBKick.hTASyncInfo != 0) +#else + if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL) +#endif + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.hTASyncInfo, +#else + &psDoKickIN->sCCBKick.hTASyncInfo, +#endif + psDoKickIN->sCCBKick.hTASyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + +#if defined(FIX_HW_BRN_31620) + /* We need to lookup the mem context and pass it through */ + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psDoKickIN->sCCBKick.hDevMemContext, + psDoKickIN->sCCBKick.hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } +#endif + +#if defined (SUPPORT_SID_INTERFACE) + if(psDoKickIN->sCCBKick.h3DSyncInfo != 0) +#else + if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL) +#endif + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.h3DSyncInfo, +#else + &psDoKickIN->sCCBKick.h3DSyncInfo, +#endif + psDoKickIN->sCCBKick.h3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + +#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) + /* SRC and DST sync details */ + if (psDoKickIN->sCCBKick.ui32NumTASrcSyncs > SGX_MAX_TA_SRC_SYNCS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + sCCBKickKM.ui32NumTASrcSyncs = psDoKickIN->sCCBKick.ui32NumTASrcSyncs; +#endif + for(i=0; i<psDoKickIN->sCCBKick.ui32NumTASrcSyncs; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.ahTASrcKernelSyncInfo[i], +#else + &psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i], +#endif + psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psDoKickIN->sCCBKick.ui32NumTADstSyncs > SGX_MAX_TA_DST_SYNCS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + sCCBKickKM.ui32NumTADstSyncs = psDoKickIN->sCCBKick.ui32NumTADstSyncs; +#endif + for(i=0; i<psDoKickIN->sCCBKick.ui32NumTADstSyncs; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.ahTADstKernelSyncInfo[i], +#else + &psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i], +#endif + psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psDoKickIN->sCCBKick.ui32Num3DSrcSyncs > SGX_MAX_3D_SRC_SYNCS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + sCCBKickKM.ui32Num3DSrcSyncs = psDoKickIN->sCCBKick.ui32Num3DSrcSyncs; +#endif + for(i=0; i<psDoKickIN->sCCBKick.ui32Num3DSrcSyncs; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.ah3DSrcKernelSyncInfo[i], +#else + &psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i], +#endif + psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#else/* #if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) */ + /* texture dependency details */ + if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS_TA) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + sCCBKickKM.ui32NumSrcSyncs = psDoKickIN->sCCBKick.ui32NumSrcSyncs; +#endif + for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.ahSrcKernelSyncInfo[i], +#else + &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i], +#endif + psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif/* #if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) */ + + if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++) + { + psRetOUT->eError = +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.asTAStatusUpdate[i].hKernelMemInfo, +#else + &psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo, +#endif + psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + +#if defined (SUPPORT_SID_INTERFACE) + sCCBKickKM.asTAStatusUpdate[i].sCtlStatus = psDoKickIN->sCCBKick.asTAStatusUpdate[i].sCtlStatus; +#endif + +#else + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.ahTAStatusSyncInfo[i], +#else + &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i], +#endif + psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); +#endif + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++) + { + psRetOUT->eError = +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.as3DStatusUpdate[i].hKernelMemInfo, +#else + &psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo, +#endif + psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + +#if defined (SUPPORT_SID_INTERFACE) + sCCBKickKM.as3DStatusUpdate[i].sCtlStatus = psDoKickIN->sCCBKick.as3DStatusUpdate[i].sCtlStatus; +#endif +#else + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.ah3DStatusSyncInfo[i], +#else + &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i], +#endif + psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); +#endif + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + ui32NumDstSyncs = psDoKickIN->sCCBKick.ui32NumDstSyncObjects; + + if(ui32NumDstSyncs > 0) + { + if(!OSAccessOK(PVR_VERIFY_READ, + psDoKickIN->sCCBKick.pahDstSyncHandles, + ui32NumDstSyncs * sizeof(IMG_HANDLE))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: SGXDoKickBW:" + " Invalid pasDstSyncHandles pointer", __FUNCTION__)); + return -EFAULT; + } + + psRetOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32NumDstSyncs * sizeof(IMG_HANDLE), + (IMG_VOID **)&phKernelSyncInfoHandles, + 0, + "Array of Synchronization Info Handles"); + if (psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + sCCBKickKM.pahDstSyncHandles = phKernelSyncInfoHandles; +#else + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + phKernelSyncInfoHandles, + psDoKickIN->sCCBKick.pahDstSyncHandles, + ui32NumDstSyncs * sizeof(IMG_HANDLE)) != PVRSRV_OK) + { + ret = -EFAULT; + goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT; + } + + /* Set sCCBKick.pahDstSyncHandles to point to the local memory */ + psDoKickIN->sCCBKick.pahDstSyncHandles = phKernelSyncInfoHandles; +#endif + + for( i = 0; i < ui32NumDstSyncs; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.pahDstSyncHandles[i], +#else + &psDoKickIN->sCCBKick.pahDstSyncHandles[i], +#endif + psDoKickIN->sCCBKick.pahDstSyncHandles[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT; + } + + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM.hKernelHWSyncListMemInfo, +#else + &psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo, +#endif + psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(psRetOUT->eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT; + } + } + +#if defined (SUPPORT_SID_INTERFACE) + OSMemCopy(&sCCBKickKM.sCommand, &psDoKickIN->sCCBKick.sCommand, sizeof(sCCBKickKM.sCommand)); + + sCCBKickKM.ui32NumDstSyncObjects = psDoKickIN->sCCBKick.ui32NumDstSyncObjects; + sCCBKickKM.ui32NumTAStatusVals = psDoKickIN->sCCBKick.ui32NumTAStatusVals; + sCCBKickKM.ui32Num3DStatusVals = psDoKickIN->sCCBKick.ui32Num3DStatusVals; + sCCBKickKM.bFirstKickOrResume = psDoKickIN->sCCBKick.bFirstKickOrResume; + sCCBKickKM.ui32CCBOffset = psDoKickIN->sCCBKick.ui32CCBOffset; + sCCBKickKM.bTADependency = psDoKickIN->sCCBKick.bTADependency; + +#if defined(NO_HARDWARE) || defined(PDUMP) + sCCBKickKM.bTerminateOrAbort = psDoKickIN->sCCBKick.bTerminateOrAbort; +#endif +#if defined(PDUMP) + sCCBKickKM.ui32CCBDumpWOff = psDoKickIN->sCCBKick.ui32CCBDumpWOff; +#endif + +#if defined(NO_HARDWARE) + sCCBKickKM.ui32WriteOpsPendingVal = psDoKickIN->sCCBKick.ui32WriteOpsPendingVal; +#endif +#endif /* #if defined (SUPPORT_SID_INTERFACE) */ + psRetOUT->eError = + SGXDoKickKM(hDevCookieInt, +#if defined (SUPPORT_SID_INTERFACE) + &sCCBKickKM); +#else + &psDoKickIN->sCCBKick); +#endif + +PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT: + + if(phKernelSyncInfoHandles) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32NumDstSyncs * sizeof(IMG_HANDLE), + (IMG_VOID *)phKernelSyncInfoHandles, + 0); + /*not nulling pointer, out of scope*/ + } + return ret; +} + + +static IMG_INT +SGXScheduleProcessQueuesBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psScheduleProcQIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt); + + return 0; +} + + +#if defined(TRANSFER_QUEUE) +static IMG_INT +SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_TRANSFER_SGX_KICK *psKick; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_TRANSFER_SGX_KICK_KM sKickKM = {0}; +#endif + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + psKick = &psSubmitTransferIN->sKick; + +#if defined(FIX_HW_BRN_31620) + /* We need to lookup the mem context and pass it through */ + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hDevMemContext, + psKick->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } +#endif + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSubmitTransferIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.hCCBMemInfo, +#else + &psKick->hCCBMemInfo, +#endif + psKick->hCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.hTASyncInfo, +#else + &psKick->hTASyncInfo, +#endif + psKick->hTASyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.h3DSyncInfo, +#else + &psKick->h3DSyncInfo, +#endif + psKick->h3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.ahSrcSyncInfo[i], +#else + &psKick->ahSrcSyncInfo[i], +#endif + psKick->ahSrcSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } + for (i = 0; i < psKick->ui32NumDstSync; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.ahDstSyncInfo[i], +#else + &psKick->ahDstSyncInfo[i], +#endif + psKick->ahDstSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + +#if defined (SUPPORT_SID_INTERFACE) + sKickKM.sHWTransferContextDevVAddr = psKick->sHWTransferContextDevVAddr; + sKickKM.ui32SharedCmdCCBOffset = psKick->ui32SharedCmdCCBOffset; + sKickKM.ui32NumSrcSync = psKick->ui32NumSrcSync; + sKickKM.ui32NumDstSync = psKick->ui32NumDstSync; + sKickKM.ui32Flags = psKick->ui32Flags; + sKickKM.ui32PDumpFlags = psKick->ui32PDumpFlags; +#if defined(PDUMP) + sKickKM.ui32CCBDumpWOff = psKick->ui32CCBDumpWOff; +#endif + + psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, &sKickKM); +#else + psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick); +#endif + + return 0; +} + +static IMG_INT +SGXSetTransferContextPriorityBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_SET_TRANSFER_CONTEXT_PRIORITY *psSGXSetTransferContextPriorityIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hTransferContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXSetTransferContextPriorityIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hTransferContextInt, + psSGXSetTransferContextPriorityIN->hHWTransferContext, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXSetTransferContextPriorityKM( + hDevCookieInt, + hTransferContextInt, + psSGXSetTransferContextPriorityIN->ui32Priority, + psSGXSetTransferContextPriorityIN->ui32OffsetOfPriorityField); + + return 0; +} + +static IMG_INT +SGXSetRenderContextPriorityBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_SET_RENDER_CONTEXT_PRIORITY *psSGXSetRenderContextPriorityIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hRenderContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXSetRenderContextPriorityIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hRenderContextInt, + psSGXSetRenderContextPriorityIN->hHWRenderContext, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXSetRenderContextPriorityKM( + hDevCookieInt, + hRenderContextInt, + psSGXSetRenderContextPriorityIN->ui32Priority, + psSGXSetRenderContextPriorityIN->ui32OffsetOfPriorityField); + + return 0; +} + + +#if defined(SGX_FEATURE_2D_HARDWARE) +static IMG_INT +SGXSubmit2DBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_2D_SGX_KICK *psKick; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_2D_SGX_KICK_KM sKickKM; +#endif + IMG_UINT32 i; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D); + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + + psKick = &psSubmit2DIN->sKick; + +#if defined(FIX_HW_BRN_31620) + /* We need to lookup the mem context and pass it through */ + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->hDevMemContext, + psKick->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } +#endif + + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSubmit2DIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.hCCBMemInfo, +#else + &psKick->hCCBMemInfo, +#endif + psKick->hCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + if (psKick->hTASyncInfo != 0) +#else + if (psKick->hTASyncInfo != IMG_NULL) +#endif + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.hTASyncInfo, +#else + &psKick->hTASyncInfo, +#endif + psKick->hTASyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#if defined (SUPPORT_SID_INTERFACE) + else + { + sKickKM.hTASyncInfo = IMG_NULL; + } +#endif + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.h3DSyncInfo, +#else + &psKick->h3DSyncInfo, +#endif + psKick->h3DSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#if defined (SUPPORT_SID_INTERFACE) + else + { + sKickKM.h3DSyncInfo = IMG_NULL; + } +#endif + + if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS) + { + psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; + return 0; + } +#if defined (SUPPORT_SID_INTERFACE) + for (i = 0; i < SGX_MAX_2D_SRC_SYNC_OPS; i++) + { + if (i < psKick->ui32NumSrcSync) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &sKickKM.ahSrcSyncInfo[i], + psKick->ahSrcSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + else + { + sKickKM.ahSrcSyncInfo[i] = IMG_NULL; + } + } +#else + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &psKick->ahSrcSyncInfo[i], + psKick->ahSrcSyncInfo[i], + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &sKickKM.hDstSyncInfo, +#else + &psKick->hDstSyncInfo, +#endif + psKick->hDstSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#if defined (SUPPORT_SID_INTERFACE) + else + { + sKickKM.hDstSyncInfo = IMG_NULL; + } + + /* copy common members across */ + sKickKM.ui32SharedCmdCCBOffset = psKick->ui32SharedCmdCCBOffset; + sKickKM.ui32NumSrcSync = psKick->ui32NumSrcSync; + sKickKM.ui32PDumpFlags = psKick->ui32PDumpFlags; + sKickKM.sHW2DContextDevVAddr = psKick->sHW2DContextDevVAddr; +#if defined(PDUMP) + sKickKM.ui32CCBDumpWOff = psKick->ui32CCBDumpWOff; +#endif +#endif + + psRetOUT->eError = +#if defined (SUPPORT_SID_INTERFACE) + SGXSubmit2DKM(hDevCookieInt, &sKickKM); +#else + SGXSubmit2DKM(hDevCookieInt, psKick); +#endif + + return 0; +} +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE) */ +#endif /* #if defined(TRANSFER_QUEUE) */ + + +static IMG_INT +SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hDevMemContextInt = 0; + PVRSRV_SGXDEV_INFO *psDevInfo; + SGX_MISC_INFO sMiscInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, + PVRSRV_BRIDGE_SGX_GETMISCINFO); + + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXGetMiscInfoIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + /* Lookup handle for dev mem context */ + if (psSGXGetMiscInfoIN->psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMREAD) + { + psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + psSGXGetMiscInfoIN->psMiscInfo->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } +#endif + /* device node is required for scheduling a CCB command */ + psDeviceNode = hDevCookieInt; + PVR_ASSERT(psDeviceNode != IMG_NULL); + if (psDeviceNode == IMG_NULL) + { + return -EFAULT; + } + + psDevInfo = psDeviceNode->pvDevice; + + /* Copy psMiscInfo to kernel space */ + psRetOUT->eError = CopyFromUserWrapper(psPerProc, + ui32BridgeID, + &sMiscInfo, + psSGXGetMiscInfoIN->psMiscInfo, + sizeof(SGX_MISC_INFO)); + if (psRetOUT->eError != PVRSRV_OK) + { + return -EFAULT; + } + + { + psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, hDevMemContextInt); + + if (psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + } + + /* Copy back misc info to user address space */ + psRetOUT->eError = CopyToUserWrapper(psPerProc, + ui32BridgeID, + psSGXGetMiscInfoIN->psMiscInfo, + &sMiscInfo, + sizeof(SGX_MISC_INFO)); + if (psRetOUT->eError != PVRSRV_OK) + { + return -EFAULT; + } + return 0; +} + + +static IMG_INT +SGXReadHWPerfCBBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN, + PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated; + IMG_HANDLE hAllocatedHandle; + IMG_UINT32 ui32AllocatedSize; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_CB); + + psSGXReadHWPerfCBOUT->eError =PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXReadHWPerfCBIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psSGXReadHWPerfCBOUT->eError != PVRSRV_OK) + { + return 0; + } + + ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize * + sizeof(psSGXReadHWPerfCBIN->psHWPerfCBData[0]); + ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError, + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32AllocatedSize, + (IMG_VOID **)&psAllocated, + &hAllocatedHandle, + "Array of Hardware Performance Circular Buffer Data")); + + psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt, + psSGXReadHWPerfCBIN->ui32ArraySize, + psAllocated, + &psSGXReadHWPerfCBOUT->ui32DataCount, + &psSGXReadHWPerfCBOUT->ui32ClockSpeed, + &psSGXReadHWPerfCBOUT->ui32HostTimeStamp); + if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK) + { + psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(psPerProc, + ui32BridgeID, + psSGXReadHWPerfCBIN->psHWPerfCBData, + psAllocated, + ui32AllocatedSize); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32AllocatedSize, + psAllocated, + hAllocatedHandle); + /*not nulling pointer, out of scope*/ + + return 0; +} + + +static IMG_INT +SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN, + PVRSRV_BRIDGE_OUT_SGXDEVINITPART2 *psSGXDevInitPart2OUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_ERROR eError = PVRSRV_OK; +#else + PVRSRV_ERROR eError; +#endif + IMG_BOOL bDissociateFailed = IMG_FALSE; + IMG_BOOL bLookupFailed = IMG_FALSE; + IMG_BOOL bReleaseFailed = IMG_FALSE; + IMG_HANDLE hDummy; + IMG_UINT32 i; +#if defined (SUPPORT_SID_INTERFACE) + SGX_BRIDGE_INIT_INFO_KM asInitInfoKM = {0}; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2); + + /* Report the kernel-side build options to UM */ + psSGXDevInitPart2OUT->ui32KMBuildOptions = SGX_BUILD_OPTIONS; + + if(!psPerProc->bInitProcess) + { + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_PROCESS_NOT_INITIALISED; + return 0; + } + + psSGXDevInitPart2OUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXDevInitPart2IN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXDevInitPart2OUT->eError != PVRSRV_OK) + { + return 0; + } + + /* Check all the meminfo handles */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + +#if defined(SGX_SUPPORT_HWPROFILING) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(SUPPORT_SGX_HWPERF) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: Failed to look up HWPerf meminfo (possibly due to SUPPORT_SGX_HWPERF option mismatch)")); + bLookupFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_29702) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(FIX_HW_BRN_29823) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + + +#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; +#else + IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; +#endif + +#if defined (SUPPORT_SID_INTERFACE) + if (hHandle == 0) +#else + if (hHandle == IMG_NULL) +#endif + { + continue; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDummy, + hHandle, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } + } + + if (bLookupFailed) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: A handle lookup failed")); + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED; + return 0; + } + + /* Lookup and release the device memory handles */ + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelCCBMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelCCBCtlMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelCCBEventKickerMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelSGXHostCtlMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelSGXTA3DCtlMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelSGXPTLAWriteBackMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelSGXMiscMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + +#if defined(SGX_SUPPORT_HWPROFILING) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelHWProfilingMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + +#if defined(SUPPORT_SGX_HWPERF) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelHWPerfCBMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelTASigBufferMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernel3DSigBufferMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_29702) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelCFIMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bLookupFailed = IMG_TRUE; + } +#endif + +#if defined(FIX_HW_BRN_29823) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelDummyTermStreamMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + + +#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWAVDMStreamMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWAIndexStreamMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWAPDSMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWAUSEMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWAParamMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWAPMPTMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWATPCMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelClearClipWAPSGRgnHdrMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, + &psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo, + psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + &asInitInfoKM.hKernelEDMStatusBufferMemInfo, +#else + &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo, +#endif + psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } +#endif + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; + IMG_HANDLE *phHandleKM = &asInitInfoKM.asInitMemHandles[i]; + + if (hHandle == 0) +#else + IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; + + if (*phHandle == IMG_NULL) +#endif + continue; + + eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, +#if defined (SUPPORT_SID_INTERFACE) + phHandleKM, + hHandle, +#else + phHandle, + *phHandle, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + if (eError != PVRSRV_OK) + { + bReleaseFailed = IMG_TRUE; + } + } + + if (bReleaseFailed) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: A handle release failed")); + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED; + /* + * Given that we checked the handles before release, a release + * failure is unexpected. + */ + PVR_DBG_BREAK; + return 0; + } + + /* Dissociate device memory from caller */ +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBCtlMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBEventKickerMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXHostCtlMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXTA3DCtlMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXPTLAWriteBackMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#endif + + /* Dissociate SGX MiscInfo buffer from user space */ +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXMiscMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + + +#if defined(SGX_SUPPORT_HWPROFILING) +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelHWProfilingMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#endif + +#if defined(SUPPORT_SGX_HWPERF) +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelHWPerfCBMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#endif + +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelTASigBufferMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelTASigBufferMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernel3DSigBufferMemInfo); +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernel3DSigBufferMemInfo); +#endif + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + +#if defined(FIX_HW_BRN_29702) +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCFIMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCFIMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#endif + +#if defined(FIX_HW_BRN_29823) +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelDummyTermStreamMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelDummyTermStreamMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#endif + +#if defined(FIX_HW_BRN_31542) || defined(FIX_HW_BRN_36513) +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAVDMStreamMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAVDMStreamMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAIndexStreamMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAIndexStreamMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAPDSMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPDSMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAUSEMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAUSEMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAParamMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAParamMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAPMPTMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPMPTMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWATPCMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWATPCMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelClearClipWAPSGRgnHdrMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelClearClipWAPSGRgnHdrMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#endif + +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelVDMSnapShotBufferMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelVDMCtrlStreamBufferMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelVDMStateUpdateBufferMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) +#if defined (SUPPORT_SID_INTERFACE) + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelEDMStatusBufferMemInfo); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } +#else + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo); + bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK); +#endif +#endif + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_HANDLE hHandle = asInitInfoKM.asInitMemHandles[i]; +#else + IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; +#endif + + if (hHandle == IMG_NULL) + continue; + + eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle); + if (eError != PVRSRV_OK) + { + bDissociateFailed = IMG_TRUE; + } + } + + /* If any dissociations failed, free all the device memory passed in */ + if(bDissociateFailed) + { +#if defined (SUPPORT_SID_INTERFACE) + PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelCCBCtlMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXHostCtlMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXTA3DCtlMemInfo); +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXPTLAWriteBackMemInfo); +#endif + PVRSRVFreeDeviceMemKM(hDevCookieInt, asInitInfoKM.hKernelSGXMiscMemInfo); +#else + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo); +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXPTLAWriteBackMemInfo); +#endif + PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo); +#endif + + for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_HANDLE hHandle = asInitInfoKM.asInitMemHandles[i]; + + if (hHandle == 0) +#else + IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i]; + + if (hHandle == IMG_NULL) +#endif + continue; + + PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle); + + } + + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitPart2BW: A dissociate failed")); + + psSGXDevInitPart2OUT->eError = PVRSRV_ERROR_INIT2_PHASE_FAILED; + + /* A dissociation failure is unexpected */ + PVR_DBG_BREAK; + return 0; + } + +#if defined (SUPPORT_SID_INTERFACE) + asInitInfoKM.sScripts = psSGXDevInitPart2IN->sInitInfo.sScripts; + asInitInfoKM.ui32ClientBuildOptions = psSGXDevInitPart2IN->sInitInfo.ui32ClientBuildOptions; + asInitInfoKM.sSGXStructSizes = psSGXDevInitPart2IN->sInitInfo.sSGXStructSizes; + asInitInfoKM.ui32CacheControl = psSGXDevInitPart2IN->sInitInfo.ui32CacheControl; + asInitInfoKM.ui32EDMTaskReg0 = psSGXDevInitPart2IN->sInitInfo.ui32EDMTaskReg0; + asInitInfoKM.ui32EDMTaskReg1 = psSGXDevInitPart2IN->sInitInfo.ui32EDMTaskReg1; + asInitInfoKM.ui32ClkGateStatusReg = psSGXDevInitPart2IN->sInitInfo.ui32ClkGateStatusReg; + asInitInfoKM.ui32ClkGateStatusMask = psSGXDevInitPart2IN->sInitInfo.ui32ClkGateStatusMask; + + OSMemCopy(&asInitInfoKM.asInitDevData , + &psSGXDevInitPart2IN->sInitInfo.asInitDevData, + sizeof(asInitInfoKM.asInitDevData)); + OSMemCopy(&asInitInfoKM.aui32HostKickAddr, + &psSGXDevInitPart2IN->sInitInfo.aui32HostKickAddr, + sizeof(asInitInfoKM.aui32HostKickAddr)); + + psSGXDevInitPart2OUT->eError = + DevInitSGXPart2KM(psPerProc, + hDevCookieInt, + &asInitInfoKM); +#else + psSGXDevInitPart2OUT->eError = + DevInitSGXPart2KM(psPerProc, + hDevCookieInt, + &psSGXDevInitPart2IN->sInitInfo); +#endif + + return 0; +} + + +static IMG_INT +SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN, + PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; +// PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_HANDLE hHWRenderContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, 1); + + psSGXRegHWRenderContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXRegHWRenderContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + hHWRenderContextInt = + SGXRegisterHWRenderContextKM(hDevCookieInt, + psSGXRegHWRenderContextIN->pHWRenderContextCpuVAddr, + psSGXRegHWRenderContextIN->ui32HWRenderContextSize, + psSGXRegHWRenderContextIN->ui32OffsetToPDDevPAddr, + psSGXRegHWRenderContextIN->hDevMemContext, + &psSGXRegHWRenderContextOUT->sHWRenderContextDevVAddr, + psPerProc); + + if (hHWRenderContextInt == IMG_NULL) + { + psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT; + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXRegHWRenderContextOUT->hHWRenderContext, + hHWRenderContextInt, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc); + + return 0; +} + + +static IMG_INT +SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hHWRenderContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hHWRenderContextInt, + psSGXUnregHWRenderContextIN->hHWRenderContext, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt, + psSGXUnregHWRenderContextIN->bForceCleanup); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnregHWRenderContextIN->hHWRenderContext, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT); + + return 0; +} + + +static IMG_INT +SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN, + PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hHWTransferContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc, 1); + + psSGXRegHWTransferContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXRegHWTransferContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + hHWTransferContextInt = + SGXRegisterHWTransferContextKM(hDevCookieInt, + psSGXRegHWTransferContextIN->pHWTransferContextCpuVAddr, + psSGXRegHWTransferContextIN->ui32HWTransferContextSize, + psSGXRegHWTransferContextIN->ui32OffsetToPDDevPAddr, + psSGXRegHWTransferContextIN->hDevMemContext, + &psSGXRegHWTransferContextOUT->sHWTransferContextDevVAddr, + psPerProc); + + if (hHWTransferContextInt == IMG_NULL) + { + psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT; + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXRegHWTransferContextOUT->hHWTransferContext, + hHWTransferContextInt, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc); + + return 0; +} + + +static IMG_INT +SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ +#if defined (SUPPORT_SID_INTERFACE) + IMG_HANDLE hHWTransferContextInt = 0; +#else + IMG_HANDLE hHWTransferContextInt; +#endif + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hHWTransferContextInt, + psSGXUnregHWTransferContextIN->hHWTransferContext, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt, + psSGXUnregHWTransferContextIN->bForceCleanup); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnregHWTransferContextIN->hHWTransferContext, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT); + + return 0; +} + + +#if defined(SGX_FEATURE_2D_HARDWARE) +static IMG_INT +SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN, + PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_HANDLE hHW2DContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc, 1); + + psSGXRegHW2DContextOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXRegHW2DContextIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK) + { + return 0; + } + + hHW2DContextInt = + SGXRegisterHW2DContextKM(hDevCookieInt, + psSGXRegHW2DContextIN->pHW2DContextCpuVAddr, + psSGXRegHW2DContextIN->ui32HW2DContextSize, + psSGXRegHW2DContextIN->ui32OffsetToPDDevPAddr, + psSGXRegHW2DContextIN->hDevMemContext, + &psSGXRegHW2DContextOUT->sHW2DContextDevVAddr, + psPerProc); + + if (hHW2DContextInt == IMG_NULL) + { + psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT; + return 0; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXRegHW2DContextOUT->hHW2DContext, + hHW2DContextInt, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc); + + return 0; +} + + +static IMG_INT +SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hHW2DContextInt; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hHW2DContextInt, + psSGXUnregHW2DContextIN->hHW2DContext, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt, + psSGXUnregHW2DContextIN->bForceCleanup); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnregHW2DContextIN->hHW2DContext, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT); + + return 0; +} +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE) */ + +static IMG_INT +SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; +// PVRSRV_SGXDEV_INFO *psDevInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXFlushHWRenderTargetIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + +// psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice; + + psRetOUT->eError = SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr, IMG_FALSE); + + return 0; +} + + +static IMG_INT +SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_VOID *pvSyncInfo; + PVRSRV_SGXDEV_INFO *psDevInfo; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + ps2DQueryBltsCompleteIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvSyncInfo, + ps2DQueryBltsCompleteIN->hKernSyncInfo, + PVRSRV_HANDLE_TYPE_SYNC_INFO); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice; + + psRetOUT->eError = + SGX2DQueryBlitsCompleteKM(psDevInfo, + (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo, + ps2DQueryBltsCompleteIN->bWaitForComplete); + + return 0; +} + + +static IMG_INT +SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN, + PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL; + IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0; + IMG_UINT32 i; + IMG_HANDLE hSharedPBDesc = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS + 4); + + psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL; + + psSGXFindSharedPBDescOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXFindSharedPBDescIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) + goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; + + psSGXFindSharedPBDescOUT->eError = + SGXFindSharedPBDescKM(psPerProc, hDevCookieInt, + psSGXFindSharedPBDescIN->bLockOnFailure, + psSGXFindSharedPBDescIN->ui32TotalPBSize, + &hSharedPBDesc, + &psSharedPBDescKernelMemInfo, + &psHWPBDescKernelMemInfo, + &psBlockKernelMemInfo, + &psHWBlockKernelMemInfo, + &ppsSharedPBDescSubKernelMemInfos, + &ui32SharedPBDescSubKernelMemInfosCount); + if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) + goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; + + PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount + <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS); + + psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount = + ui32SharedPBDescSubKernelMemInfosCount; + + if(hSharedPBDesc == IMG_NULL) + { + psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0; + /* It's not an error if we don't find a buffer, + * we just return NULL */ + goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hSharedPBDesc, + hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + + /* + * We allocate handles of type PVRSRV_HANDLE_TYPE_MEM_INFO_REF here, + * as the process doesn't own the underlying memory, and so should + * only be allowed a restricted set of operations on it, such as + * mapping it into its address space. + */ + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle, + psSharedPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle, + psHWPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle, + psBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOUT->hHWBlockKernelMemInfoHandle, + psHWBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDesc); + + + for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++) + { + PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut = + psSGXFindSharedPBDescOUT; + + PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, + &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i], + ppsSharedPBDescSubKernelMemInfos[i], + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_ALLOC_FLAG_MULTI, + psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle); + } + +PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT: + if (ppsSharedPBDescSubKernelMemInfos != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount, + ppsSharedPBDescSubKernelMemInfos, + IMG_NULL); + } + + if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK) + { + if(hSharedPBDesc != IMG_NULL) + { + SGXUnrefSharedPBDescKM(hSharedPBDesc); + } + } + else + { + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc); + } + + return 0; +} + + +static IMG_INT +SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN, + PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hSharedPBDesc; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC); + + psSGXUnrefSharedPBDescOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + &hSharedPBDesc, + psSGXUnrefSharedPBDescIN->hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC); + if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXUnrefSharedPBDescOUT->eError = + SGXUnrefSharedPBDescKM(hSharedPBDesc); + + if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXUnrefSharedPBDescOUT->eError = + PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXUnrefSharedPBDescIN->hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC); + + return 0; +} + + +static IMG_INT +SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN, + PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo; + IMG_UINT32 ui32KernelMemInfoHandlesCount = + psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount; + IMG_INT ret = 0; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID *phKernelMemInfoHandles = 0; +#else + IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL; +#endif + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL; + IMG_UINT32 i; + PVRSRV_ERROR eError; + IMG_HANDLE hSharedPBDesc = IMG_NULL; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, 1); + + psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL; + + PVR_ASSERT(ui32KernelMemInfoHandlesCount + <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS); + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevCookieInt, + psSGXAddSharedPBDescIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psSharedPBDescKernelMemInfo, + psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psHWPBDescKernelMemInfo, + psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psBlockKernelMemInfo, + psSGXAddSharedPBDescIN->hBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&psHWBlockKernelMemInfo, + psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + + if(!OSAccessOK(PVR_VERIFY_READ, + psSGXAddSharedPBDescIN->phKernelMemInfoHandles, + ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:" + " Invalid phKernelMemInfos pointer", __FUNCTION__)); + ret = -EFAULT; + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE), + (IMG_VOID **)&phKernelMemInfoHandles, + 0, + "Array of Handles"); + if (eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + phKernelMemInfoHandles, + psSGXAddSharedPBDescIN->phKernelMemInfoHandles, + ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)) + != PVRSRV_OK) + { + ret = -EFAULT; + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *), + (IMG_VOID **)&ppsKernelMemInfos, + 0, + "Array of pointers to Kernel Memory Info"); + if (eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + for(i=0; i<ui32KernelMemInfoHandlesCount; i++) + { + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID **)&ppsKernelMemInfos[i], + phKernelMemInfoHandles[i], + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + } + + /* + * Release all the handles we've just looked up, as none + * of the associated resources will be valid for access via + * those handles once we return from SGXAddSharedPBDesc. + */ + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + + for(i=0; i<ui32KernelMemInfoHandlesCount; i++) + { + /* PRQA S 3198 2 */ /* override redundant warning as PVR_ASSERT is ignored by QAC */ + eError = PVRSRVReleaseHandle(psPerProc->psHandleBase, + phKernelMemInfoHandles[i], + PVRSRV_HANDLE_TYPE_MEM_INFO); + PVR_ASSERT(eError == PVRSRV_OK); + } + + eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt, + psSharedPBDescKernelMemInfo, + psHWPBDescKernelMemInfo, + psBlockKernelMemInfo, + psHWBlockKernelMemInfo, + psSGXAddSharedPBDescIN->ui32TotalPBSize, + &hSharedPBDesc, + ppsKernelMemInfos, + ui32KernelMemInfoHandlesCount, + psSGXAddSharedPBDescIN->sHWPBDescDevVAddr); + + + if (eError != PVRSRV_OK) + { + goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT; + } + + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psSGXAddSharedPBDescOUT->hSharedPBDesc, + hSharedPBDesc, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + +PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT: + + if(phKernelMemInfoHandles) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE), + (IMG_VOID *)phKernelMemInfoHandles, + 0); + } + if(ppsKernelMemInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *), + (IMG_VOID *)ppsKernelMemInfos, + 0); + } + + if(ret == 0 && eError == PVRSRV_OK) + { + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc); + } + + psSGXAddSharedPBDescOUT->eError = eError; + + return ret; +} + +static IMG_INT +SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN, + PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_HANDLE hDevCookieInt; + IMG_UINT32 i; +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_HEAP_INFO_KM asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; +#endif + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT); + + NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS); + + if(!psPerProc->bInitProcess) + { + psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_PROCESS_NOT_INITIALISED; + return 0; + } + + psSGXInfoForSrvinitOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, + psSGXInfoForSrvinitIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + + if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK) + { + return 0; + } + + psSGXInfoForSrvinitOUT->eError = + SGXGetInfoForSrvinitKM(hDevCookieInt, +#if defined (SUPPORT_SID_INTERFACE) + &asHeapInfo[0], + &psSGXInfoForSrvinitOUT->sInitInfo.sPDDevPAddr); +#else + &psSGXInfoForSrvinitOUT->sInitInfo); +#endif + + if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK) + { + return 0; + } + + for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) + { + PVRSRV_HEAP_INFO *psHeapInfo; + + psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i]; + +#if defined (SUPPORT_SID_INTERFACE) + if ((asHeapInfo[i].ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID) && + (asHeapInfo[i].hDevMemHeap != IMG_NULL)) + { + /* Allocate heap handle */ + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &psHeapInfo->hDevMemHeap, + asHeapInfo[i].hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + } + else + { + psHeapInfo->hDevMemHeap = 0; + } + + psHeapInfo->ui32HeapID = asHeapInfo[i].ui32HeapID; + psHeapInfo->sDevVAddrBase = asHeapInfo[i].sDevVAddrBase; + psHeapInfo->ui32HeapByteSize = asHeapInfo[i].ui32HeapByteSize; + psHeapInfo->ui32Attribs = asHeapInfo[i].ui32Attribs; + psHeapInfo->ui32XTileStride = asHeapInfo[i].ui32XTileStride; +#else + if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID) + { + IMG_HANDLE hDevMemHeapExt; + + if (psHeapInfo->hDevMemHeap != IMG_NULL) + { + /* Allocate heap handle */ + PVRSRVAllocHandleNR(psPerProc->psHandleBase, + &hDevMemHeapExt, + psHeapInfo->hDevMemHeap, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_ALLOC_FLAG_SHARED); + psHeapInfo->hDevMemHeap = hDevMemHeapExt; + } + } +#endif + } + + COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc); + + return 0; +} + +#if defined(PDUMP) +// PRQA S 5120++ +/***************************************************************************** + FUNCTION : DumpBufferArray + PURPOSE : PDUMP information in stored buffer array + PARAMETERS : + RETURNS : +*****************************************************************************/ +static IMG_VOID +DumpBufferArray(PVRSRV_PER_PROCESS_DATA *psPerProc, +#if defined (SUPPORT_SID_INTERFACE) + PSGX_KICKTA_DUMP_BUFFER_KM psBufferArray, +#else + PSGX_KICKTA_DUMP_BUFFER psBufferArray, +#endif + IMG_UINT32 ui32BufferArrayLength, + IMG_BOOL bDumpPolls) +{ + IMG_UINT32 i; + + for (i=0; i<ui32BufferArrayLength; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + PSGX_KICKTA_DUMP_BUFFER_KM psBuffer; +#else + PSGX_KICKTA_DUMP_BUFFER psBuffer; +#endif + PVRSRV_KERNEL_MEM_INFO *psCtrlMemInfoKM; + IMG_CHAR * pszName; + IMG_HANDLE hUniqueTag; + IMG_UINT32 ui32Offset; + + psBuffer = &psBufferArray[i]; + pszName = psBuffer->pszName; + if (!pszName) + { + pszName = "Nameless buffer"; + } + + hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo); + + #if defined(SUPPORT_SGX_NEW_STATUS_VALS) + psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hCtrlKernelMemInfo); + ui32Offset = psBuffer->sCtrlDevVAddr.uiAddr - psCtrlMemInfoKM->sDevVAddr.uiAddr; + #else + psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM; + ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); + #endif + + if (psBuffer->ui32Start <= psBuffer->ui32End) + { + if (bDumpPolls) + { + PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName); + PDUMPCBP(psCtrlMemInfoKM, + ui32Offset, + psBuffer->ui32Start, + psBuffer->ui32SpaceUsed, + psBuffer->ui32BufferSize, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + } + + PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName); + PDUMPMEMUM(psPerProc, + IMG_NULL, + psBuffer->pvLinAddr, + (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo, + psBuffer->ui32Start, + psBuffer->ui32End - psBuffer->ui32Start, + 0, + hUniqueTag); + } + else + { + /* + Range of data wraps the end of the buffer so it needs to be dumped in two sections + */ + + if (bDumpPolls) + { + PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName); + PDUMPCBP(psCtrlMemInfoKM, + ui32Offset, + psBuffer->ui32Start, + psBuffer->ui32BackEndLength, + psBuffer->ui32BufferSize, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + } + PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName); + PDUMPMEMUM(psPerProc, + IMG_NULL, + psBuffer->pvLinAddr, + (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo, + psBuffer->ui32Start, + psBuffer->ui32BackEndLength, + 0, + hUniqueTag); + + if (bDumpPolls) + { + PDUMPMEMPOL(psCtrlMemInfoKM, + ui32Offset, + 0, + 0xFFFFFFFF, + PDUMP_POLL_OPERATOR_NOTEQUAL, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + + PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName); + PDUMPCBP(psCtrlMemInfoKM, + ui32Offset, + 0, + psBuffer->ui32End, + psBuffer->ui32BufferSize, + 0, + MAKEUNIQUETAG(psCtrlMemInfoKM)); + } + PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName); + PDUMPMEMUM(psPerProc, + IMG_NULL, + psBuffer->pvLinAddr, + (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo, + 0, + psBuffer->ui32End, + 0, + hUniqueTag); + } + } +} +static IMG_INT +SGXPDumpBufferArrayBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 i; +#if defined (SUPPORT_SID_INTERFACE) + SGX_KICKTA_DUMP_BUFFER *psUMPtr; + SGX_KICKTA_DUMP_BUFFER_KM *psKickTADumpBufferKM, *psKMPtr; +#else +#if defined(__QNXNTO__) + const IMG_UINT32 NAME_BUFFER_SIZE = 30; + IMG_PCHAR pszNameBuffer, pszName; + IMG_UINT32 ui32NameBufferArraySize, ui32NameLength; +#endif + SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer; +#endif + IMG_UINT32 ui32BufferArrayLength = + psPDumpBufferArrayIN->ui32BufferArrayLength; + IMG_UINT32 ui32BufferArraySize = + ui32BufferArrayLength * sizeof(SGX_KICKTA_DUMP_BUFFER); + PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS; + + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY); + +#if defined (SUPPORT_SID_INTERFACE) + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32BufferArraySize, + (IMG_PVOID *)&psKickTADumpBufferKM, 0, + "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK) +#else + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32BufferArraySize, + (IMG_PVOID *)&psKickTADumpBuffer, 0, + "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK) +#endif + { + return -ENOMEM; + } + +#if !defined (SUPPORT_SID_INTERFACE) + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + psKickTADumpBuffer, + psPDumpBufferArrayIN->psBufferArray, + ui32BufferArraySize) != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0); + /*not nulling pointer, out of scope*/ + return -EFAULT; + } + +#if defined (__QNXNTO__) + ui32NameBufferArraySize = ui32BufferArrayLength * NAME_BUFFER_SIZE; + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, ui32NameBufferArraySize, + (IMG_PVOID *)&pszNameBuffer, 0, + "Kick Tile Accelerator Dump Buffer names") != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0); + return -ENOMEM; + } + + pszName = pszNameBuffer; + + for (i=0; i<ui32BufferArrayLength; i++) + { + if (psKickTADumpBuffer[i].pszName) + { + ui32NameLength = psKickTADumpBuffer[i].ui32NameLength; + if (ui32NameLength >= NAME_BUFFER_SIZE) + { + ui32NameLength = NAME_BUFFER_SIZE - 1; + } + + if (ui32NameLength && + (CopyFromUserWrapper(psPerProc, ui32BridgeID, pszName, + psKickTADumpBuffer[i].pszName, ui32NameLength + 1) == PVRSRV_OK)) + { + pszName[NAME_BUFFER_SIZE - 1] = 0; + psKickTADumpBuffer[i].pszName = pszName; + pszName += NAME_BUFFER_SIZE; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "Failed to read PDUMP buffer name")); + psKickTADumpBuffer[i].pszName = 0; + } + } + } +#endif +#endif + + for(i = 0; i < ui32BufferArrayLength; i++) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_VOID *pvMemInfo = IMG_NULL; + psUMPtr = &psPDumpBufferArrayIN->psBufferArray[i]; + psKMPtr = &psKickTADumpBufferKM[i]; +#else + IMG_VOID *pvMemInfo; +#endif + + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psUMPtr->hKernelMemInfo, +#else + psKickTADumpBuffer[i].hKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: " + "PVRSRVLookupHandle failed (%d)", eError)); + break; + } +#if defined (SUPPORT_SID_INTERFACE) + psKMPtr->hKernelMemInfo = pvMemInfo; +#else + psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo; +#endif + +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &pvMemInfo, +#if defined (SUPPORT_SID_INTERFACE) + psUMPtr->hCtrlKernelMemInfo, +#else + psKickTADumpBuffer[i].hCtrlKernelMemInfo, +#endif + PVRSRV_HANDLE_TYPE_MEM_INFO); + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: " + "PVRSRVLookupHandle failed (%d)", eError)); + break; + } +#if defined (SUPPORT_SID_INTERFACE) + psKMPtr->hCtrlKernelMemInfo = pvMemInfo; + psKMPtr->sCtrlDevVAddr = psUMPtr->sCtrlDevVAddr; +#else + psKickTADumpBuffer[i].hCtrlKernelMemInfo = pvMemInfo; +#endif +#endif + +#if defined (SUPPORT_SID_INTERFACE) + psKMPtr->ui32SpaceUsed = psUMPtr->ui32SpaceUsed; + psKMPtr->ui32Start = psUMPtr->ui32Start; + psKMPtr->ui32End = psUMPtr->ui32End; + psKMPtr->ui32BufferSize = psUMPtr->ui32BufferSize; + psKMPtr->ui32BackEndLength = psUMPtr->ui32BackEndLength; + psKMPtr->uiAllocIndex = psUMPtr->uiAllocIndex; + psKMPtr->pvLinAddr = psUMPtr->pvLinAddr; + psKMPtr->pszName = psUMPtr->pszName; +#endif + } + + if(eError == PVRSRV_OK) + { + DumpBufferArray(psPerProc, +#if defined (SUPPORT_SID_INTERFACE) + psKickTADumpBufferKM, +#else + psKickTADumpBuffer, +#endif + ui32BufferArrayLength, + psPDumpBufferArrayIN->bDumpPolls); + } + +#if defined (SUPPORT_SID_INTERFACE) + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBufferKM, 0); +#else + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0); +#if defined (__QNXNTO__) + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32NameBufferArraySize, pszNameBuffer, 0); +#endif +#endif + /*not nulling pointer, out of scope*/ + + return 0; +} + +static IMG_INT +SGXPDump3DSignatureRegistersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS *psPDump3DSignatureRegistersIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32RegisterArraySize = psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32); + IMG_UINT32 *pui32Registers = IMG_NULL; + PVRSRV_SGXDEV_INFO *psDevInfo; +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + IMG_UINT32 ui32RegVal = 0; +#endif + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt = 0; + IMG_UINT32 ui32MMUContextID; + IMG_INT ret = -EFAULT; + + PVR_UNREFERENCED_PARAMETER(psRetOUT); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS); + + if (ui32RegisterArraySize == 0) + { + goto ExitNoError; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psDeviceNode, + psPDump3DSignatureRegistersIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed")); + goto Exit; + } + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + /* Enable all cores available */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT, + psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif +#endif + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32RegisterArraySize, + (IMG_PVOID *)&pui32Registers, 0, + "Array of Registers") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: OSAllocMem failed")); + goto Exit; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pui32Registers, + psPDump3DSignatureRegistersIN->pui32Registers, + ui32RegisterArraySize) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed")); + goto Exit; + } + + PDump3DSignatureRegisters(&psDeviceNode->sDevId, + psPDump3DSignatureRegistersIN->ui32DumpFrameNum, + psPDump3DSignatureRegistersIN->bLastFrame, + pui32Registers, + psPDump3DSignatureRegistersIN->ui32NumRegisters); + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDump3DSignatureRegistersIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* look up the MMU context ID */ + PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL); + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID((IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext); + + PDumpSignatureBuffer(&psDeviceNode->sDevId, + "out.tasig", "TA", 0, + psDevInfo->psKernelTASigBufferMemInfo->sDevVAddr, + (IMG_UINT32)psDevInfo->psKernelTASigBufferMemInfo->uAllocSize, + ui32MMUContextID, + 0 /*ui32PDumpFlags*/); + PDumpSignatureBuffer(&psDeviceNode->sDevId, + "out.3dsig", "3D", 0, + psDevInfo->psKernel3DSigBufferMemInfo->sDevVAddr, + (IMG_UINT32)psDevInfo->psKernel3DSigBufferMemInfo->uAllocSize, + ui32MMUContextID, + 0 /*ui32PDumpFlags*/); + +ExitNoError: + psRetOUT->eError = PVRSRV_OK; + ret = 0; +Exit: + if (pui32Registers != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0); + } + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + if (psDevInfo != IMG_NULL) + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, ui32RegVal, + psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif + } +#endif + + return ret; +} + +static IMG_INT +SGXPDumpCounterRegistersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS *psPDumpCounterRegistersIN, + IMG_VOID *psBridgeOut, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32RegisterArraySize = psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32); + IMG_UINT32 *pui32Registers = IMG_NULL; + PVRSRV_DEVICE_NODE *psDeviceNode ; + IMG_INT ret = -EFAULT; + + PVR_UNREFERENCED_PARAMETER(psBridgeOut); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS); + + if (ui32RegisterArraySize == 0) + { + goto ExitNoError; + } + + if(PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psDeviceNode, + psPDumpCounterRegistersIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXPDumpCounterRegistersBW: hDevCookie lookup failed")); + ret = -ENOMEM; + goto Exit; + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32RegisterArraySize, + (IMG_PVOID *)&pui32Registers, 0, + "Array of Registers") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: OSAllocMem failed")); + ret = -ENOMEM; + goto Exit; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pui32Registers, + psPDumpCounterRegistersIN->pui32Registers, + ui32RegisterArraySize) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: CopyFromUserWrapper failed")); + goto Exit; + } + + PDumpCounterRegisters(&psDeviceNode->sDevId, + psPDumpCounterRegistersIN->ui32DumpFrameNum, + psPDumpCounterRegistersIN->bLastFrame, + pui32Registers, + psPDumpCounterRegistersIN->ui32NumRegisters); + +ExitNoError: + ret = 0; +Exit: + if (pui32Registers != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0); + } + + return ret; +} + +static IMG_INT +SGXPDumpTASignatureRegistersBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS *psPDumpTASignatureRegistersIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + IMG_UINT32 ui32RegisterArraySize = psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32); + IMG_UINT32 *pui32Registers = IMG_NULL; +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL; + IMG_UINT32 ui32RegVal = 0; +#endif + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_INT ret = -EFAULT; + + PVR_UNREFERENCED_PARAMETER(psRetOUT); + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS); + + if (ui32RegisterArraySize == 0) + { + goto ExitNoError; + } + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID**)&psDeviceNode, + psPDumpTASignatureRegistersIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed")); + goto Exit; + } + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + + /* Enable all cores available */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT, + psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif +#endif + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32RegisterArraySize, + (IMG_PVOID *)&pui32Registers, 0, + "Array of Registers") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: OSAllocMem failed")); + ret = -ENOMEM; + goto Exit; + } + + if(CopyFromUserWrapper(psPerProc, + ui32BridgeID, + pui32Registers, + psPDumpTASignatureRegistersIN->pui32Registers, + ui32RegisterArraySize) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed")); + goto Exit; + } + + PDumpTASignatureRegisters(&psDeviceNode->sDevId, + psPDumpTASignatureRegistersIN->ui32DumpFrameNum, + psPDumpTASignatureRegistersIN->ui32TAKickCount, + psPDumpTASignatureRegistersIN->bLastFrame, + pui32Registers, + psPDumpTASignatureRegistersIN->ui32NumRegisters); + +ExitNoError: + psRetOUT->eError = PVRSRV_OK; + ret = 0; +Exit: + if (pui32Registers != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0); + } + +#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270) + if (psDevInfo != IMG_NULL) + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal); +#if defined(PDUMP) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_CORE, ui32RegVal, + psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); +#endif + } +#endif + + return ret; +} +//PRQA S 5120-- + + +static IMG_INT +SGXPDumpHWPerfCBBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ +#if defined(SUPPORT_SGX_HWPERF) +#if defined(__linux__) + PVRSRV_SGXDEV_INFO *psDevInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt = 0; + IMG_UINT32 ui32MMUContextID = 0; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_VOID**)&psDeviceNode, + psPDumpHWPerfCBIN->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psDevInfo = psDeviceNode->pvDevice; + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDumpHWPerfCBIN->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* look up the MMU context ID */ + PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL); + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID(hDevMemContextInt); + + PDumpHWPerfCBKM(&psDeviceNode->sDevId, + &psPDumpHWPerfCBIN->szFileName[0], + psPDumpHWPerfCBIN->ui32FileOffset, + psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr, + psDevInfo->psKernelHWPerfCBMemInfo->uAllocSize, + ui32MMUContextID, + psPDumpHWPerfCBIN->ui32PDumpFlags); + + return 0; +#else + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN); + PVR_UNREFERENCED_PARAMETER(psRetOUT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + return 0; +#endif +#else + PVR_UNREFERENCED_PARAMETER(ui32BridgeID); + PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN); + PVR_UNREFERENCED_PARAMETER(psRetOUT); + PVR_UNREFERENCED_PARAMETER(psPerProc); + return -EFAULT; +#endif /* defined(SUPPORT_SGX_HWPERF) */ +} + + +static IMG_INT +SGXPDumpSaveMemBW(IMG_UINT32 ui32BridgeID, + PVRSRV_BRIDGE_IN_PDUMP_SAVEMEM *psPDumpSaveMem, + PVRSRV_BRIDGE_RETURN *psRetOUT, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_HANDLE hDevMemContextInt = 0; + IMG_UINT32 ui32MMUContextID; + + PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM); + + psRetOUT->eError = + PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_VOID**)&psDeviceNode, + psPDumpSaveMem->hDevCookie, + PVRSRV_HANDLE_TYPE_DEV_NODE); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + psRetOUT->eError = + PVRSRVLookupHandle( psPerProc->psHandleBase, + &hDevMemContextInt, + psPDumpSaveMem->hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + if(psRetOUT->eError != PVRSRV_OK) + { + return 0; + } + + /* look up the MMU context ID */ + PVR_ASSERT(psDeviceNode->pfnMMUGetContextID != IMG_NULL); + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID(hDevMemContextInt); + + PDumpSaveMemKM(&psDeviceNode->sDevId, + &psPDumpSaveMem->szFileName[0], + psPDumpSaveMem->ui32FileOffset, + psPDumpSaveMem->sDevVAddr, + psPDumpSaveMem->ui32Size, + ui32MMUContextID, + psPDumpSaveMem->ui32PDumpFlags); + return 0; +} + +#endif /* PDUMP */ + + +/* PRQA S 0313,3635 END_SET_SGX */ /* function macro required this format */ +IMG_VOID SetSGXDispatchTableEntry(IMG_VOID) +{ + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW); + +#if defined(TRANSFER_QUEUE) + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW); +#endif + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW); +#if defined(SGX_FEATURE_2D_HARDWARE) + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW); +#endif + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW); + + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_RENDER_CONTEXT_PRIORITY, SGXSetRenderContextPriorityBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SET_TRANSFER_CONTEXT_PRIORITY, SGXSetTransferContextPriorityBW); + +#if defined(PDUMP) + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, SGXPDump3DSignatureRegistersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, SGXPDumpCounterRegistersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, SGXPDumpTASignatureRegistersBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB, SGXPDumpHWPerfCBBW); + SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_SAVEMEM, SGXPDumpSaveMemBW); +#endif +} +/* PRQA L:END_SET_SGX */ /* end of setup overrides */ + +#endif /* SUPPORT_SGX */ diff --git a/pvr-source/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h b/pvr-source/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h new file mode 100644 index 0000000..3cb6282 --- /dev/null +++ b/pvr-source/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h @@ -0,0 +1,61 @@ +/*************************************************************************/ /*! +@Title SGX Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the PVR Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __BRIDGED_SGX_BRIDGE_H__ +#define __BRIDGED_SGX_BRIDGE_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + + +IMG_VOID SetSGXDispatchTableEntry(IMG_VOID); + +#if defined (__cplusplus) +} +#endif + +#endif /* __BRIDGED_SGX_BRIDGE_H__ */ + +/****************************************************************************** + End of file (bridged_sgx_bridge.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/buffer_manager.c b/pvr-source/services4/srvkm/common/buffer_manager.c new file mode 100644 index 0000000..9ce7a11 --- /dev/null +++ b/pvr-source/services4/srvkm/common/buffer_manager.c @@ -0,0 +1,3573 @@ +/*************************************************************************/ /*! +@Title Buffer management functions for Linux +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Manages buffers mapped into two memory spaces - cpu and device, + either of which can be virtual or physical. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" + +#include "sysconfig.h" +#include "hash.h" +#include "ra.h" +#include "pdump_km.h" +#include "lists.h" + +static IMG_BOOL +ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags); +static IMG_VOID +BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping); +static IMG_BOOL +BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, IMG_UINTPTR_T *pBase); + +static IMG_INT32 +DevMemoryAlloc (BM_CONTEXT *pBMContext, + BM_MAPPING *pMapping, + IMG_SIZE_T *pActualSize, + IMG_UINT32 uFlags, + IMG_UINT32 dev_vaddr_alignment, + IMG_DEV_VIRTADDR *pDevVAddr); +static IMG_INT32 +DevMemoryFree (BM_MAPPING *pMapping); + +/*! +****************************************************************************** + + @Function AllocMemory + + @Description Allocate a buffer mapped into both cpu and device virtual + address spaces. This is now quite simple: + + 1. Choose whence to get the memory; + 2. Obtain memory from that source; + 3. Work out the actual buffer addresses in other spaces. + + In choosing whence to get the memory we work like this: + + 1. If an import arena exists, use unless BP_CONTIGUOUS is set; + 2. Use a contiguous pool. + + @Input pBMContext - BM context + @Input psBMHeap - BM heap + @Input psDevVAddr - device virtual address (optional) + @Input uSize - requested buffer size in bytes. + @Input uFlags - property flags for the buffer. + @Input uDevVAddrAlignment - required device virtual address + alignment, or 0. + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + + @Output pBuf - receives a pointer to a descriptor of the allocated + buffer. + @Return IMG_TRUE - Success + IMG_FALSE - Failed. + + *****************************************************************************/ +static IMG_BOOL +AllocMemory (BM_CONTEXT *pBMContext, + BM_HEAP *psBMHeap, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T uSize, + IMG_UINT32 uFlags, + IMG_UINT32 uDevVAddrAlignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + BM_BUF *pBuf) +{ + BM_MAPPING *pMapping; + IMG_UINTPTR_T uOffset; + RA_ARENA *pArena = IMG_NULL; + + PVR_DPF ((PVR_DBG_MESSAGE, + "AllocMemory (uSize=0x%x, uFlags=0x%x, align=0x%x)", + uSize, uFlags, uDevVAddrAlignment)); + + /* + what to do depends on combination of DevVaddr generation + and backing RAM requirement + */ + if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) + { + if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) + { + /* user supplied DevVAddr, RAM backing */ + PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported")); + return IMG_FALSE; + } + + /* BM supplied DevVAddr, RAM Backing */ + + /* check heap attributes */ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + /* specify arena (VM+RAM)*/ + pArena = psBMHeap->pImportArena; + PVR_ASSERT(psBMHeap->sDevArena.psDeviceMemoryHeapInfo->ui32Attribs & PVRSRV_MEM_RAM_BACKED_ALLOCATION); + } + else + { + PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap")); + return IMG_FALSE; + } + + /* Now allocate from the arena we chose above. */ + /* in case of a pageable buffer, we must bypass RA which could + * combine/split individual mappings between buffers: + */ + if (uFlags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_GPU_PAGEABLE)) + { + IMG_BOOL bSuccess; + IMG_SIZE_T puiActualSize; + IMG_SIZE_T uRequestSize = uSize; + + if(uFlags & PVRSRV_MEM_SPARSE) + { + uRequestSize = ui32ChunkSize * ui32NumPhysChunks; + uSize = ui32ChunkSize * ui32NumVirtChunks; + } + + /* Allocate physical memory */ + if (!BM_ImportMemory(psBMHeap, + uRequestSize, + &puiActualSize, + &pMapping, + uFlags, + pvPrivData, + ui32PrivDataLength, + (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr))) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: Failed to allocate device memory")); + return IMG_FALSE; + } + pBuf->hOSMemHandle = pMapping->hOSMemHandle; + + /* We allocate VM space for sparse area */ + if(uFlags & PVRSRV_MEM_SPARSE) + { + if (puiActualSize != ui32ChunkSize * ui32NumPhysChunks) + { + /* + * Most likely the chunk size was not host page multiple, + * so return with an error + */ + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: Failed to allocate" + "memory for sparse allocation")); + BM_FreeMemory(pArena, IMG_NULL, pMapping); + return IMG_FALSE; + } + + pMapping->uSizeVM = uSize; + pMapping->ui32ChunkSize = ui32ChunkSize; + pMapping->ui32NumVirtChunks = ui32NumVirtChunks; + pMapping->ui32NumPhysChunks = ui32NumPhysChunks; + pMapping->pabMapChunk = pabMapChunk; + + if (!(uFlags & PVRSRV_HAP_NO_GPU_VIRTUAL_ON_ALLOC)) + { + /* Allocate VA space and map in the physical memory */ + bSuccess = DevMemoryAlloc (pBMContext, + pMapping, + IMG_NULL, + uFlags, + (IMG_UINT32)uDevVAddrAlignment, + &pMapping->DevVAddr); + if (!bSuccess) + { + PVR_DPF((PVR_DBG_ERROR, + "AllocMemory: Failed to allocate device memory")); + BM_FreeMemory(pArena, IMG_NULL, pMapping); + return IMG_FALSE; + } + + /* uDevVAddrAlignment is currently set to zero so QAC + * generates warning which we override */ + /* PRQA S 3356,3358 1 */ + PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1); + pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr; + } + } + } + else + { + if (!RA_Alloc(pArena, + uSize, + IMG_NULL, + (IMG_VOID*) &pMapping, + uFlags, + uDevVAddrAlignment, + 0, + pvPrivData, + ui32PrivDataLength, + (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr))) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) hOSMemHandle %p, flags 0x%08x FAILED", + uSize, pMapping->hOSMemHandle, uFlags)); + return IMG_FALSE; + } + } + + uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr; + if(pMapping->CpuVAddr) + { + pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset); + } + else + { + pBuf->CpuVAddr = IMG_NULL; + } + + if(uSize == pMapping->uSizeVM) + { + pBuf->hOSMemHandle = pMapping->hOSMemHandle; + } + else + { + if(OSGetSubMemHandle(pMapping->hOSMemHandle, + uOffset, + uSize, + psBMHeap->ui32Attribs, + &pBuf->hOSMemHandle)!=PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED")); + return IMG_FALSE; + } + } + + /* for hm_contiguous and hm_wrapped memory, the pMapping + * will have a physical address, else 0 */ + pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset; + + if(uFlags & PVRSRV_MEM_ZERO) + { + if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags)) + { + return IMG_FALSE; + } + } + } + else + { + if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) + { + /* user supplied DevVAddr, no RAM backing */ + PVR_ASSERT(psDevVAddr != IMG_NULL); + + if (psDevVAddr == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr")); + return IMG_FALSE; + } + + /* just make space in the pagetables */ + pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap, + uSize, + IMG_NULL, + PVRSRV_MEM_USER_SUPPLIED_DEVVADDR, + uDevVAddrAlignment, + psDevVAddr); + + /* setup buf */ + pBuf->DevVAddr = *psDevVAddr; + } + else + { + IMG_BOOL bResult; + /* BM supplied DevVAddr, no RAM Backing */ + + /* just make space in the pagetables */ + bResult = pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap, + uSize, + IMG_NULL, + 0, + uDevVAddrAlignment, + &pBuf->DevVAddr); + + if(!bResult) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: MMUAlloc failed")); + return IMG_FALSE; + } + } + + /* allocate a mocked-up mapping */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (struct _BM_MAPPING_), + (IMG_PVOID *)&pMapping, IMG_NULL, + "Buffer Manager Mapping") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED", sizeof(*pMapping))); + return IMG_FALSE; + } + + /* setup buf */ + pBuf->CpuVAddr = IMG_NULL; + pBuf->hOSMemHandle = 0; + pBuf->CpuPAddr.uiAddr = 0; + + /* setup mapping */ + pMapping->CpuVAddr = IMG_NULL; + pMapping->CpuPAddr.uiAddr = 0; + pMapping->DevVAddr = pBuf->DevVAddr; + pMapping->ui32MappingCount = 1; + pMapping->psSysAddr = IMG_NULL; + pMapping->uSize = uSize; + pMapping->hOSMemHandle = 0; + } + + /* Record the arena pointer in the mapping. */ + pMapping->pArena = pArena; + pMapping->ui32DevVAddrAlignment = uDevVAddrAlignment; + + /* record the heap */ + pMapping->pBMHeap = psBMHeap; + pBuf->pMapping = pMapping; + + /* output some stats */ + PVR_DPF ((PVR_DBG_MESSAGE, + "AllocMemory: pMapping=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x", + (IMG_UINTPTR_T)pMapping, + pMapping->DevVAddr.uiAddr, + (IMG_UINTPTR_T)pMapping->CpuVAddr, + pMapping->CpuPAddr.uiAddr, + pMapping->uSize)); + + PVR_DPF ((PVR_DBG_MESSAGE, + "AllocMemory: pBuf=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x", + (IMG_UINTPTR_T)pBuf, + pBuf->DevVAddr.uiAddr, + (IMG_UINTPTR_T)pBuf->CpuVAddr, + pBuf->CpuPAddr.uiAddr, + uSize)); + + /* Verify virtual device address alignment */ + PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0); + + return IMG_TRUE; +} + + +/*! +****************************************************************************** + + @Function WrapMemory + + @Description Allocate a buffer mapped into both cpu and device virtual + address spaces. + + @Input psBMHeap - BM heap + @Input uSize - requested buffer size in bytes. + @Input ui32BaseOffset - Offset from page of wrap. + @Input bPhysContig - Is the wrap physically contiguous. + @Input psAddr - List of pages to wrap. + @Input pvCPUVAddr - Optional CPU Kernel virtual address (page aligned) of memory to wrap + @Input uFlags - property flags for the buffer. + @Output Buf - receives a pointer to a descriptor of the allocated + buffer. + @Return IMG_TRUE - Success + IMG_FALSE - Failed. + + *****************************************************************************/ +static IMG_BOOL +WrapMemory (BM_HEAP *psBMHeap, + IMG_SIZE_T uSize, + IMG_SIZE_T ui32BaseOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 uFlags, + BM_BUF *pBuf) +{ + IMG_DEV_VIRTADDR DevVAddr = {0}; + BM_MAPPING *pMapping; + IMG_INT32 bResult; + IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE(); + + PVR_DPF ((PVR_DBG_MESSAGE, + "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%08x, flags=0x%x)", + (IMG_UINTPTR_T)psBMHeap, uSize, ui32BaseOffset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags)); + + PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0); + /* Only need lower 12 bits of the cpu addr - don't care what size a void* is */ + PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0); + + uSize += ui32BaseOffset; + uSize = HOST_PAGEALIGN (uSize); + + /* allocate a mocked-up mapping */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*pMapping), + (IMG_PVOID *)&pMapping, IMG_NULL, + "Mocked-up mapping") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping))); + return IMG_FALSE; + } + + OSMemSet(pMapping, 0, sizeof (*pMapping)); + + pMapping->uSize = uSize; + pMapping->uSizeVM = uSize; + pMapping->pBMHeap = psBMHeap; + + if(pvCPUVAddr) + { + pMapping->CpuVAddr = pvCPUVAddr; + + if (bPhysContig) + { + pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr; + pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]); + + if(OSRegisterMem(pMapping->CpuPAddr, + pMapping->CpuVAddr, + pMapping->uSize, + uFlags, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x%08X, Size=%d) failed", + pMapping->CpuPAddr.uiAddr, pMapping->uSize)); + goto fail_cleanup; + } + } + else + { + pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr; + pMapping->psSysAddr = psAddr; + + if(OSRegisterDiscontigMem(pMapping->psSysAddr, + pMapping->CpuVAddr, + pMapping->uSize, + uFlags, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem Size=%d) failed", + pMapping->uSize)); + goto fail_cleanup; + } + } + } + else + { + if (bPhysContig) + { + pMapping->eCpuMemoryOrigin = hm_wrapped; + pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]); + + if(OSReservePhys(pMapping->CpuPAddr, + pMapping->uSize, + uFlags, + IMG_NULL, + &pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed", + pMapping->CpuPAddr.uiAddr, pMapping->uSize)); + goto fail_cleanup; + } + } + else + { + pMapping->eCpuMemoryOrigin = hm_wrapped_scatter; + pMapping->psSysAddr = psAddr; + + if(OSReserveDiscontigPhys(pMapping->psSysAddr, + pMapping->uSize, + uFlags, + &pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%d) failed", + pMapping->uSize)); + goto fail_cleanup; + } + } + } + + /* + * Allocate device memory for this buffer. Map wrapped pages as read/write + */ + bResult = DevMemoryAlloc(psBMHeap->pBMContext, + pMapping, + IMG_NULL, + uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE, + IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize), + &DevVAddr); + if (bResult <= 0) + { + PVR_DPF((PVR_DBG_ERROR, + "WrapMemory: DevMemoryAlloc(0x%x) failed", + pMapping->uSize)); + goto fail_cleanup; + } + + /* + * Determine the offset of this allocation within the underlying + * dual mapped chunk of memory, we can assume that all three + * addresses associated with this allocation are placed at the same + * offset within the underlying chunk. + */ + pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset; + if(!ui32BaseOffset) + { + pBuf->hOSMemHandle = pMapping->hOSMemHandle; + } + else + { + if(OSGetSubMemHandle(pMapping->hOSMemHandle, + ui32BaseOffset, + (pMapping->uSize-ui32BaseOffset), + uFlags, + &pBuf->hOSMemHandle)!=PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed")); + goto fail_cleanup; + } + } + if(pMapping->CpuVAddr) + { + pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset); + } + pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset); + + if(uFlags & PVRSRV_MEM_ZERO) + { + if(!ZeroBuf(pBuf, pMapping, uSize, uFlags)) + { + return IMG_FALSE; + } + } + + PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr)); + PVR_DPF ((PVR_DBG_MESSAGE, + "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x", + pMapping->DevVAddr.uiAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize)); + PVR_DPF ((PVR_DBG_MESSAGE, + "WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x", + pBuf->DevVAddr.uiAddr, pBuf->CpuPAddr.uiAddr, uSize)); + + pBuf->pMapping = pMapping; + return IMG_TRUE; + +fail_cleanup: + if(ui32BaseOffset && pBuf->hOSMemHandle) + { + OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags); + } + + if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) + { + switch(pMapping->eCpuMemoryOrigin) + { + case hm_wrapped: + OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle); + break; + case hm_wrapped_virtaddr: + OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter: + OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter_virtaddr: + OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle); + break; + default: + break; + } + + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return IMG_FALSE; +} + + +static IMG_BOOL +ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags) +{ + IMG_VOID *pvCpuVAddr; + + if(pBuf->CpuVAddr) + { + OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes); + } + else if(pMapping->eCpuMemoryOrigin == hm_contiguous + || pMapping->eCpuMemoryOrigin == hm_wrapped) + { + pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr, + ui32Bytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + if(!pvCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed")); + return IMG_FALSE; + } + OSMemSet(pvCpuVAddr, 0, ui32Bytes); + OSUnMapPhysToLin(pvCpuVAddr, + ui32Bytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + } + else + { + IMG_SIZE_T ui32BytesRemaining = ui32Bytes; + IMG_SIZE_T ui32CurrentOffset = 0; + IMG_CPU_PHYADDR CpuPAddr; + + /* Walk through the pBuf one page at a time and use + * transient mappings to zero the memory */ + + PVR_ASSERT(pBuf->hOSMemHandle); + + while(ui32BytesRemaining > 0) + { + IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE()); + CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset); + /* If the CpuPAddr isn't page aligned then start by writing up to the next page + * boundary (or ui32BytesRemaining if less), so that subsequent iterations can + * copy full physical pages. */ + if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1)) + { + ui32BlockBytes = + MIN(ui32BytesRemaining, (IMG_UINT32)(HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr)); + } + + pvCpuVAddr = OSMapPhysToLin(CpuPAddr, + ui32BlockBytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + if(!pvCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED")); + return IMG_FALSE; + } + OSMemSet(pvCpuVAddr, 0, ui32BlockBytes); + OSUnMapPhysToLin(pvCpuVAddr, + ui32BlockBytes, + PVRSRV_HAP_KERNEL_ONLY + | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK), + IMG_NULL); + + ui32BytesRemaining -= ui32BlockBytes; + ui32CurrentOffset += ui32BlockBytes; + } + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function FreeBuf + + @Description Free a buffer previously allocated with BM_Alloc() or unwrap + one previous wrapped with BM_Wrap(). + The buffer is identified by the buffer descriptor pBuf + returned at allocation. Note the double indirection when + passing the buffer. + + + @Input pBuf - buffer descriptor to free. + @Input ui32Flags - flags + @Input bFromAllocator - Is this being called by the + allocator? + + @Return None. + + *****************************************************************************/ +static IMG_VOID +FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator) +{ + BM_MAPPING *pMapping; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_DPF ((PVR_DBG_MESSAGE, + "FreeBuf: pBuf=0x%x: DevVAddr=%08X CpuVAddr=0x%x CpuPAddr=%08X", + (IMG_UINTPTR_T)pBuf, pBuf->DevVAddr.uiAddr, + (IMG_UINTPTR_T)pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr)); + + /* record mapping */ + pMapping = pBuf->pMapping; + + psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode; + if (psDeviceNode->pfnCacheInvalidate) + { + psDeviceNode->pfnCacheInvalidate(psDeviceNode); + } + + if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) + { + /* Submemhandle is required by exported mappings */ + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + /* user supplied Device Virtual Address */ + if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) + { + /* RAM backed allocation */ + PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported")); + } + else + { + /* free the mocked-up mapping */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/ + } + } + } + else + { + /* BM supplied Device Virtual Address */ + if(pBuf->hOSMemHandle != pMapping->hOSMemHandle) + { + /* Submemhandle is required by exported mappings */ + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags); + } + } + + if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) + { + /* Submemhandle is required by exported mappings */ + + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + /* + RAM backed allocation + Note: currently no need to distinguish between hm_env and hm_contiguous + */ + PVR_ASSERT(pBuf->ui32ExportCount == 0); + if (pBuf->pMapping->ui32Flags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_GPU_PAGEABLE)) + { + IMG_UINT32 ui32FreeSize = 0; + IMG_PVOID pvFreePtr = IMG_NULL; + + if(pBuf->pMapping->ui32Flags & PVRSRV_MEM_SPARSE) + { + ui32FreeSize = sizeof(IMG_BOOL) * pBuf->pMapping->ui32NumVirtChunks; + pvFreePtr = pBuf->pMapping->pabMapChunk; + } + + /* With sparse and page-able allocations we don't go through the sub-alloc RA */ + BM_FreeMemory(pBuf->pMapping->pBMHeap, pBuf->DevVAddr.uiAddr, pBuf->pMapping); + + if(pvFreePtr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + ui32FreeSize, + pvFreePtr, + IMG_NULL); + } + } + else + { + RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE); + } + } + } + else + { + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + switch (pMapping->eCpuMemoryOrigin) + { + case hm_wrapped: + OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + case hm_wrapped_virtaddr: + OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter: + OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + case hm_wrapped_scatter_virtaddr: + OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); + break; + default: + break; + } + } + if (bFromAllocator) + DevMemoryFree (pMapping); + + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + /* free the mocked-up mapping */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + pBuf->pMapping = IMG_NULL; /*nulling pointer alias*/ + } + } + } + + + if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + + @Function BM_DestroyContext_AnyCb + + @Description Destroy a buffer manager heap. + + @Input psBMHeap + + @Return PVRSRV_ERROR + + *****************************************************************************/ +static PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap) +{ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + if (psBMHeap->pImportArena) + { + IMG_BOOL bTestDelete = RA_TestDelete(psBMHeap->pImportArena); + if (!bTestDelete) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext_AnyCb: RA_TestDelete failed")); + return PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP; + } + } + } + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function BM_DestroyContext + + @Description Destroy a buffer manager context. All allocated buffers must be + free'd before calling this function. This function is called + also to perform cleanup during aborted initialisations so it's + fairly careful not to assume any given resource has really been + created/allocated. + + @Return PVRSRV_ERROR + + *****************************************************************************/ +PVRSRV_ERROR +BM_DestroyContext(IMG_HANDLE hBMContext, + IMG_BOOL *pbDestroyed) +{ + PVRSRV_ERROR eError; + BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext; + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext")); + + if (pbDestroyed != IMG_NULL) + { + *pbDestroyed = IMG_FALSE; + } + + /* + Exit straight away if it's an invalid context handle + */ + if (pBMContext == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pBMContext->ui32RefCount--; + + if (pBMContext->ui32RefCount > 0) + { + /* Just return if there are more references to this context */ + return PVRSRV_OK; + } + + /* + Check whether there is a bug in the client which brought it here before + all the allocations have been freed. + */ + eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, &BM_DestroyContext_AnyCb); + if(eError != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed")); + return eError; + } + else + { + /* free the device memory context */ + eError = ResManFreeResByPtr(pBMContext->hResItem, CLEANUP_WITH_POLL); + if(eError != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError)); + return eError; + } + + /* mark context as destroyed */ + if (pbDestroyed != IMG_NULL) + { + *pbDestroyed = IMG_TRUE; + } + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function BM_DestroyContextCallBack_AnyVaCb + + @Description Destroy Device memory context + + @Input psBMHeap - heap to be freed. + @Input va - list of variable arguments with the following contents: + - psDeviceNode + @Return PVRSRV_ERROR + + *****************************************************************************/ +static PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*); + + /* Free up the import arenas */ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + if (psBMHeap->pImportArena) + { + RA_Delete (psBMHeap->pImportArena); + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported")); + return PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE; + } + + /* Free up the MMU Heaps */ + psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap); + + /* Free Heap memory */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function BM_DestroyContextCallBack + + @Description Destroy Device memory context + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + + @Return PVRSRV_ERROR + + *****************************************************************************/ +static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + BM_CONTEXT *pBMContext = pvParam; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_ERROR eError; +/* BM_CONTEXT **ppBMContext; + BM_HEAP *psBMHeap, *psTmpBMHeap;*/ + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + /* + Get DeviceNode from BMcontext + */ + psDeviceNode = pBMContext->psDeviceNode; + + /* + Free the import arenas and heaps + */ + eError = List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap, + &BM_DestroyContextCallBack_AnyVaCb, + psDeviceNode); + if (eError != PVRSRV_OK) + { + return eError; + } + /* + 'Finalise' the MMU + */ + if (pBMContext->psMMUContext) + { + psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext); + } + + /* + Free up generic, useful resources - if they were allocated. + */ + if (pBMContext->pBufferHash) + { + HASH_Delete(pBMContext->pBufferHash); + } + + if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext) + { + /* Freeing the kernel context */ + psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL; + } + else + { + if (pBMContext->ppsThis != IMG_NULL) + { + /* + * Remove context from the linked list + */ + List_BM_CONTEXT_Remove(pBMContext); + } + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +static IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, va_list va) +{ + PRESMAN_CONTEXT hResManContext; + hResManContext = va_arg(va, PRESMAN_CONTEXT); + if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK) + { + /* just increment the refcount and return the memory context found for this process */ + pBMContext->ui32RefCount++; + return pBMContext; + } + return IMG_NULL; +} + +static IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_list va) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + BM_CONTEXT *pBMContext; + psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*); + pBMContext = va_arg(va, BM_CONTEXT*); + switch(psBMHeap->sDevArena.DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED: + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + /* insert the heap into the device's MMU page directory/table */ + psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap); + break; + } + } +} + +/*! +****************************************************************************** + + @Function BM_CreateContext + + @Description Creates and initialises a buffer manager context. This function must be called + before any other buffer manager functions. + + @Return valid BM context handle - Success + IMG_NULL - Failed + + *****************************************************************************/ +IMG_HANDLE +BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_PHYADDR *psPDDevPAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_BOOL *pbCreated) +{ + BM_CONTEXT *pBMContext; +/* BM_HEAP *psBMHeap;*/ + DEVICE_MEMORY_INFO *psDevMemoryInfo; + IMG_BOOL bKernelContext; + PRESMAN_CONTEXT hResManContext; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext")); + + if (psPerProc == IMG_NULL) + { + bKernelContext = IMG_TRUE; + hResManContext = psDeviceNode->hResManContext; + } + else + { + bKernelContext = IMG_FALSE; + hResManContext = psPerProc->hResManContext; + } + + if (pbCreated != IMG_NULL) + { + *pbCreated = IMG_FALSE; + } + + /* setup the device memory info. */ + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + + if (bKernelContext == IMG_FALSE) + { + IMG_HANDLE res = (IMG_HANDLE) List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext, + &BM_CreateContext_IncRefCount_AnyVaCb, + hResManContext); + if (res) + { + return res; + } + } + + /* allocate a BM context */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (struct _BM_CONTEXT_), + (IMG_PVOID *)&pBMContext, IMG_NULL, + "Buffer Manager Context") != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed")); + return IMG_NULL; + } + OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT)); + + /* store the associated devicenode */ + pBMContext->psDeviceNode = psDeviceNode; + + /* This hash table is used to store BM_Wraps in a global way */ + /* INTEGRATION_POINT: 32 is an abitrary limit on the number of hashed BM_wraps */ + pBMContext->pBufferHash = HASH_Create(32); + if (pBMContext->pBufferHash==IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed")); + goto cleanup; + } + + if((IMG_NULL == psDeviceNode->pfnMMUInitialise) || (psDeviceNode->pfnMMUInitialise(psDeviceNode, + &pBMContext->psMMUContext, + psPDDevPAddr) != PVRSRV_OK)) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed")); + goto cleanup; + } + + if(bKernelContext) + { + /* just save the kernel context */ + PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL); + psDevMemoryInfo->pBMKernelContext = pBMContext; + } + else + { + /* + On the creation of each new context we must + insert the kernel context's 'shared' and 'shared_exported' + heaps into the new context + - check the kernel context and heaps exist + */ + PVR_ASSERT(psDevMemoryInfo->pBMKernelContext); + + if (psDevMemoryInfo->pBMKernelContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid")); + goto cleanup; + } + + PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap); + + /* + insert the kernel heaps structures into the new context's shared heap list + Note. this will include the kernel only heaps but these will not actually + be imported into the context nor returned to the client + */ + pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap; + + /* + insert the shared heaps into the MMU page directory/table + for the new context + */ + List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap, + &BM_CreateContext_InsertHeap_ForEachVaCb, + psDeviceNode, + pBMContext); + + /* Finally, insert the new context into the list of BM contexts */ + List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext); + } + + /* Increment the refcount, as creation is successful */ + pBMContext->ui32RefCount++; + + /* register with resman */ + pBMContext->hResItem = ResManRegisterRes(hResManContext, + RESMAN_TYPE_DEVICEMEM_CONTEXT, + pBMContext, + 0, + &BM_DestroyContextCallBack); + if (pBMContext->hResItem == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed")); + goto cleanup; + } + + if (pbCreated != IMG_NULL) + { + *pbCreated = IMG_TRUE; + } + return (IMG_HANDLE)pBMContext; + +cleanup: + (IMG_VOID)BM_DestroyContextCallBack(pBMContext, 0, CLEANUP_WITH_POLL); + + return IMG_NULL; +} + + +static IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va) +{ + DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo; + psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*); + if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID) + { + /* Match - just return already created heap */ + return psBMHeap; + } + else + { + return IMG_NULL; + } +} + +/*! +****************************************************************************** + + @Function BM_CreateHeap + + @Description Creates and initialises a BM heap for a given BM context. + + @Return + valid heap handle - success + IMG_NULL - failure + + + *****************************************************************************/ +IMG_HANDLE +BM_CreateHeap (IMG_HANDLE hBMContext, + DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo) +{ + BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext; + PVRSRV_DEVICE_NODE *psDeviceNode; + BM_HEAP *psBMHeap; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap")); + + if(!pBMContext) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: BM_CONTEXT null")); + return IMG_NULL; + } + + psDeviceNode = pBMContext->psDeviceNode; + + /* + * Ensure that the heap size is a multiple of the data page size. + */ + PVR_ASSERT((psDevMemHeapInfo->ui32HeapSize & (psDevMemHeapInfo->ui32DataPageSize - 1)) == 0); + PVR_ASSERT(psDevMemHeapInfo->ui32HeapSize > 0); + + /* + We may be being asked to create a heap in a context which already has one. + Test for refcount > 0 because PVRSRVGetDeviceMemHeapInfoKM doesn't increment the refcount. + This does mean that the first call to PVRSRVCreateDeviceMemContextKM will first try to find + heaps that we already know don't exist + */ + if(pBMContext->ui32RefCount > 0) + { + psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap, + &BM_CreateHeap_AnyVaCb, + psDevMemHeapInfo); + + if (psBMHeap) + { + return psBMHeap; + } + } + + + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_HEAP), + (IMG_PVOID *)&psBMHeap, IMG_NULL, + "Buffer Manager Heap") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed")); + return IMG_NULL; + } + + OSMemSet (psBMHeap, 0, sizeof (BM_HEAP)); + + psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID; + psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName; + psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase; + psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize; + psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType; + psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize; + psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo; + psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs; +#if defined(SUPPORT_MEMORY_TILING) + psBMHeap->ui32XTileStride = psDevMemHeapInfo->ui32XTileStride; +#endif + + /* tie the heap to the context */ + psBMHeap->pBMContext = pBMContext; + + psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext, + &psBMHeap->sDevArena, + &psBMHeap->pVMArena, + &psBMHeap->psMMUAttrib); + if (!psBMHeap->pMMUHeap) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed")); + goto ErrorExit; + } + + /* memory is allocated from the OS as required */ + psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName, + 0, 0, IMG_NULL, + MAX(HOST_PAGESIZE(), psBMHeap->sDevArena.ui32DataPageSize), + &BM_ImportMemory, + &BM_FreeMemory, + IMG_NULL, + psBMHeap); + if(psBMHeap->pImportArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed")); + goto ErrorExit; + } + + if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) + { + /* + memory comes from a device memory contiguous allocator (ra) + Note: these arenas are shared across the system so don't delete + as part of heap destroy + */ + psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena; + if(psBMHeap->pLocalDevMemArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null")); + goto ErrorExit; + } + } + + /* insert heap into head of the heap list */ + List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap); + + return (IMG_HANDLE)psBMHeap; + + /* handle error case */ +ErrorExit: + + /* Free up the MMU if we created one */ + if (psBMHeap->pMMUHeap != IMG_NULL) + { + psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap); + /* don't finalise psMMUContext as we don't own it */ + } + + /* Free the Heap memory */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return IMG_NULL; +} + +/*! +****************************************************************************** + + @Function BM_DestroyHeap + + @Description Destroys a BM heap + + @Return + valid heap handle - success + IMG_NULL - failure + + + *****************************************************************************/ +IMG_VOID +BM_DestroyHeap (IMG_HANDLE hDevMemHeap) +{ + BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap; + PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap")); + + if(psBMHeap) + { + /* Free up the import arenas */ + if(psBMHeap->ui32Attribs + & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG + |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) + { + if (psBMHeap->pImportArena) + { + RA_Delete (psBMHeap->pImportArena); + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported")); + return; + } + + /* Free up the MMU Heap */ + psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap); + + /* remove from the heap list */ + List_BM_HEAP_Remove(psBMHeap); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL); + } + else + { + PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle")); + } +} + + +/*! +****************************************************************************** + + @Function BM_Reinitialise + + @Description Reinitialise the buffer manager after a power down event. + + @Return IMG_TRUE - Success + IMG_FALSE - Failed + + *****************************************************************************/ +IMG_BOOL +BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode) +{ + + PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise")); + PVR_UNREFERENCED_PARAMETER(psDeviceNode); + + /* FIXME: Need to reenable all contexts + List_BM_CONTEXT_ForEach(psDeviceNode->sDevMemoryInfo.pBMContext, MMU_Enable); + */ + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function BM_Alloc + + @Description Allocate a buffer mapped into both cpu and device virtual + memory maps. + + @Input hDevMemHeap + @Input psDevVAddr - device virtual address specified by caller (optional) + @Input uSize - require size in bytes of the buffer. + @Input pui32Flags - bit mask of buffer property flags. + @Input uDevVAddrAlignment - required alignment in bytes, or 0. + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + + @Output phBuf - receives buffer handle + @Output pui32Flags - bit mask of heap property flags. + + @Return IMG_TRUE - Success + IMG_FALSE - Failure + + *****************************************************************************/ +IMG_BOOL +BM_Alloc ( IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T uSize, + IMG_UINT32 *pui32Flags, + IMG_UINT32 uDevVAddrAlignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + BM_HANDLE *phBuf) +{ + BM_BUF *pBuf; + BM_CONTEXT *pBMContext; + BM_HEAP *psBMHeap; + SYS_DATA *psSysData; + IMG_UINT32 uFlags; + + if (pui32Flags == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter")); + PVR_DBG_BREAK; + return IMG_FALSE; + } + + uFlags = *pui32Flags; + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)", + uSize, uFlags, uDevVAddrAlignment)); + + SysAcquireData(&psSysData); + + psBMHeap = (BM_HEAP*)hDevMemHeap; + pBMContext = psBMHeap->pBMContext; + + if(uDevVAddrAlignment == 0) + { + uDevVAddrAlignment = 1; + } + + /* + * Allocate something in which to record the allocation's details. + */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_BUF), + (IMG_PVOID *)&pBuf, IMG_NULL, + "Buffer Manager buffer") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED")); + return IMG_FALSE; + } + OSMemSet(pBuf, 0, sizeof (BM_BUF)); + + /* + * Allocate the memory itself now. + */ + if (AllocMemory(pBMContext, + psBMHeap, + psDevVAddr, + uSize, + uFlags, + uDevVAddrAlignment, + pvPrivData, + ui32PrivDataLength, + ui32ChunkSize, + ui32NumVirtChunks, + ui32NumPhysChunks, + pabMapChunk, + pBuf) != IMG_TRUE) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL); + /* not nulling pointer, out of scope */ + PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED")); + return IMG_FALSE; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Alloc (uSize=0x%x, uFlags=0x%x)", + uSize, uFlags)); + + /* + * Assign the handle and return. + */ + pBuf->ui32RefCount = 1; + *phBuf = (BM_HANDLE)pBuf; + *pui32Flags = uFlags | psBMHeap->ui32Attribs; + + /* + * If the user has specified heap CACHETYPE flags themselves, + * override any CACHETYPE flags inherited from the heap. + */ + if(uFlags & PVRSRV_HAP_CACHETYPE_MASK) + { + *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK; + *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK); + } + + return IMG_TRUE; +} + + + +#if defined(PVR_LMA) +/*! +****************************************************************************** + + @Function ValidSysPAddrArrayForDev + + @Description Verify the array of system address is accessible + by the given device. + + @Input psDeviceNode + @Input psSysPAddr - system address array + @Input ui32PageSize - size of address array + + @Return IMG_BOOL + + *****************************************************************************/ +static IMG_BOOL +ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize) +{ + IMG_UINT32 i; + + for (i = 0; i < ui32PageCount; i++) + { + IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i]; + IMG_SYS_PHYADDR sEndSysPAddr; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr)) + { + return IMG_FALSE; + } + + sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr)) + { + return IMG_FALSE; + } + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function ValidSysPAddrRangeForDev + + @Description Verify a system address range is accessible + by the given device. + + @Input psDeviceNode + @Input sStartSysPAddr - starting system address + @Input ui32Range - length of address range + + @Return IMG_BOOL + + *****************************************************************************/ +static IMG_BOOL +ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range) +{ + IMG_SYS_PHYADDR sEndSysPAddr; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr)) + { + return IMG_FALSE; + } + + sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range; + + if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr)) + { + return IMG_FALSE; + } + + return IMG_TRUE; +} + +#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset)) + +#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize)) + +#endif + + +/*! +****************************************************************************** + + @Function BM_Wrap + + @Description Create a buffer which wraps user provided system physical + memory. + The wrapped memory must be page aligned. BM_Wrap will + roundup the size to a multiple of cpu pages. + + @Input ui32Size - size of memory to wrap. + @Input ui32Offset - Offset into page of memory to wrap. + @Input bPhysContig - Is the wrap physically contiguous. + @Input psSysAddr - list of system physical page addresses of memory to wrap. + @Input pvCPUVAddr - optional CPU kernel virtual address (Page aligned) of memory to wrap. + @Input uFlags - bit mask of buffer property flags. + @output phBuf - receives the buffer handle. + + @Return IMG_TRUE - Success. + IMG_FALSE - Failed + + *****************************************************************************/ +IMG_BOOL +BM_Wrap ( IMG_HANDLE hDevMemHeap, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Offset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psSysAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 *pui32Flags, + BM_HANDLE *phBuf) +{ + BM_BUF *pBuf; + BM_CONTEXT *psBMContext; + BM_HEAP *psBMHeap; + SYS_DATA *psSysData; + IMG_SYS_PHYADDR sHashAddress; + IMG_UINT32 uFlags; + + psBMHeap = (BM_HEAP*)hDevMemHeap; + psBMContext = psBMHeap->pBMContext; + + uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK | PVRSRV_HAP_MAPPING_CTRL_MASK); + + if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0)) + { + uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK; + uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)", + ui32Size, ui32Offset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags)); + + SysAcquireData(&psSysData); + +#if defined(PVR_LMA) + if (bPhysContig) + { + if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset))) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device")); + return IMG_FALSE; + } + } + else + { + IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE(); + + if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize)) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device")); + return IMG_FALSE; + } + } +#endif + /* + * Insert the System Physical Address of the first page into the hash so we can optimise multiple wraps of the + * same memory. + */ + sHashAddress = psSysAddr[0]; + + /* Add the in-page offset to ensure a unique hash */ + sHashAddress.uiAddr += ui32Offset; + + /* See if this address has already been wrapped */ + pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, sHashAddress.uiAddr); + + if(pBuf) + { + IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset); + + /* Check base address, size and contiguity type match */ + if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || + pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)) + { + PVR_DPF((PVR_DBG_MESSAGE, + "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)", + ui32Size, ui32Offset, sHashAddress.uiAddr)); + + PVRSRVBMBufIncRef(pBuf); + *phBuf = (BM_HANDLE)pBuf; + if(pui32Flags) + *pui32Flags = uFlags; + + return IMG_TRUE; + } + else + { + /* Otherwise removed that item from the hash table + (a workaround for buffer device class) */ + HASH_Remove(psBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddress.uiAddr); + } + } + + /* + * Allocate something in which to record the allocation's details. + */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_BUF), + (IMG_PVOID *)&pBuf, IMG_NULL, + "Buffer Manager buffer") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED")); + return IMG_FALSE; + } + OSMemSet(pBuf, 0, sizeof (BM_BUF)); + + /* + * Actually perform the memory wrap. + */ + if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL); + /*not nulling pointer, out of scope*/ + return IMG_FALSE; + } + + /* Only insert the buffer in the hash table if it is contiguous - allows for optimisation of multiple wraps + * of the same contiguous buffer. + */ + if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) + { + /* Have we calculated the right Hash key ? */ + PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr); + + if (!HASH_Insert (psBMContext->pBufferHash, sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf)) + { + FreeBuf (pBuf, uFlags, IMG_TRUE); + PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED")); + return IMG_FALSE; + } + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_Wrap (uSize=0x%x, uFlags=0x%x, devVAddr=%08X)", + ui32Size, uFlags, pBuf->DevVAddr.uiAddr)); + + /* + * Assign the handle and return. + */ + pBuf->ui32RefCount = 1; + *phBuf = (BM_HANDLE)pBuf; + if(pui32Flags) + { + /* need to override the heap attributes SINGLE PROC to MULT_PROC. */ + *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS; + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + + @Function BM_Export + + @Description Export a buffer previously allocated via BM_Alloc. + + @Input hBuf - buffer handle. + @Input ui32Flags - flags + + @Return None. + + *****************************************************************************/ + +IMG_VOID +BM_Export (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVRSRVBMBufIncExport(pBuf); +} + +/*! +****************************************************************************** + @Function BM_Export + + @Description Export a buffer previously allocated via BM_Alloc. + + @Input hBuf - buffer handle. + + @Return None. +**************************************************************************/ +IMG_VOID +BM_FreeExport(BM_HANDLE hBuf, + IMG_UINT32 ui32Flags) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVRSRVBMBufDecExport(pBuf); + FreeBuf (pBuf, ui32Flags, IMG_FALSE); +} + +/*! +****************************************************************************** + @Function BM_FreeExport + + @Description Free a buffer previously exported via BM_Export. + + @Input hBuf - buffer handle. + @Input ui32Flags - flags + + @Return None. +**************************************************************************/ +IMG_VOID +BM_Free (BM_HANDLE hBuf, + IMG_UINT32 ui32Flags) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + SYS_DATA *psSysData; + IMG_SYS_PHYADDR sHashAddr; + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=0x%x)", (IMG_UINTPTR_T)hBuf)); + PVR_ASSERT (pBuf!=IMG_NULL); + + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter")); + return; + } + + SysAcquireData(&psSysData); + + PVRSRVBMBufDecRef(pBuf); + if(pBuf->ui32RefCount == 0) + { + if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) + { + sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr); + + HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr); + } + FreeBuf (pBuf, ui32Flags, IMG_TRUE); + } +} + + +/*! +****************************************************************************** + + @Function BM_HandleToCpuVaddr + + @Description Retreive the cpu virtual address associated with a buffer. + + @Input buffer handle. + + @Return buffers cpu virtual address, or NULL if none exists + + *****************************************************************************/ +IMG_CPU_VIRTADDR +BM_HandleToCpuVaddr (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToCpuVaddr: invalid parameter")); + return IMG_NULL; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_HandleToCpuVaddr(h=0x%x)=0x%x", + (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->CpuVAddr)); + return pBuf->CpuVAddr; +} + + +/*! +****************************************************************************** + + @Function BM_HandleToDevVaddr + + @Description Retreive the device virtual address associated with a buffer. + + @Input hBuf - buffer handle. + + @Return buffers device virtual address. + + *****************************************************************************/ +IMG_DEV_VIRTADDR +BM_HandleToDevVaddr (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + if (pBuf == IMG_NULL) + { + IMG_DEV_VIRTADDR DevVAddr = {0}; + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToDevVaddr: invalid parameter")); + return DevVAddr; + } + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->DevVAddr.uiAddr)); + return pBuf->DevVAddr; +} + + +/*! +****************************************************************************** + + @Function BM_HandleToSysPaddr + + @Description Retreive the system physical address associated with a buffer. + + @Input hBuf - buffer handle. + + @Return buffers device virtual address. + + *****************************************************************************/ +IMG_SYS_PHYADDR +BM_HandleToSysPaddr (BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + + if (pBuf == IMG_NULL) + { + IMG_SYS_PHYADDR PhysAddr = {0}; + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter")); + return PhysAddr; + } + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->CpuPAddr.uiAddr)); + return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr); +} + +/*! +****************************************************************************** + + @Function BM_HandleToMemOSHandle + + @Description Retreive the underlying memory handle associated with a buffer. + + @Input hBuf - buffer handle. + + @Return OS Specific memory handle. + + *****************************************************************************/ +IMG_HANDLE +BM_HandleToOSMemHandle(BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + + PVR_ASSERT (pBuf != IMG_NULL); + + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_HandleToOSMemHandle: invalid parameter")); + return IMG_NULL; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_HandleToOSMemHandle(h=0x%x)=0x%x", + (IMG_UINTPTR_T)hBuf, (IMG_UINTPTR_T)pBuf->hOSMemHandle)); + return pBuf->hOSMemHandle; +} + +/*---------------------------------------------------------------------------- +<function> + FUNCTION: BM_UnmapFromDev + + PURPOSE: Unmaps a buffer from GPU virtual address space, but otherwise + leaves buffer intact (ie. not changing any CPU virtual space + mappings, etc). This in conjunction with BM_RemapToDev() can + be used to migrate buffers in and out of GPU virtual address + space to deal with fragmentation and/or limited size of GPU + MMU. + + PARAMETERS: In: hBuf - buffer handle. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +</function> +-----------------------------------------------------------------------------*/ +IMG_INT32 +BM_UnmapFromDev(BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + BM_MAPPING *pMapping; + IMG_INT32 result; + + PVR_ASSERT (pBuf != IMG_NULL); + + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_UnmapFromDev: invalid parameter")); + return -(PVRSRV_ERROR_INVALID_PARAMS); + } + + pMapping = pBuf->pMapping; + + if ((pMapping->ui32Flags & PVRSRV_HAP_GPU_PAGEABLE) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "BM_UnmapFromDev: cannot unmap non-pageable buffer")); + return -(PVRSRV_ERROR_STILL_MAPPED); + } + + result = DevMemoryFree(pMapping); + + if(result == 0) + pBuf->DevVAddr.uiAddr = PVRSRV_BAD_DEVICE_ADDRESS; + + return result; +} + +/*---------------------------------------------------------------------------- +<function> + FUNCTION: BM_RemapToDev + + PURPOSE: Maps a buffer back into GPU virtual address space, after it + has been BM_UnmapFromDev()'d. After this operation, the GPU + virtual address may have changed, so BM_HandleToDevVaddr() + should be called to get the new address. + + PARAMETERS: In: hBuf - buffer handle. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +</function> +-----------------------------------------------------------------------------*/ +IMG_INT32 +BM_RemapToDev(BM_HANDLE hBuf) +{ + BM_BUF *pBuf = (BM_BUF *)hBuf; + BM_MAPPING *pMapping; + IMG_INT32 mapCount; + + PVR_ASSERT (pBuf != IMG_NULL); + + if (pBuf == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_RemapToDev: invalid parameter")); + return -PVRSRV_ERROR_INVALID_PARAMS; + } + + pMapping = pBuf->pMapping; + + if ((pMapping->ui32Flags & PVRSRV_HAP_GPU_PAGEABLE) == 0) + { + PVR_DPF((PVR_DBG_ERROR, "BM_RemapToDev: cannot remap non-pageable buffer")); + return -PVRSRV_ERROR_BAD_MAPPING; + } + + mapCount = DevMemoryAlloc(pMapping->pBMHeap->pBMContext, pMapping, IMG_NULL, + pMapping->ui32Flags, pMapping->ui32DevVAddrAlignment, &pBuf->DevVAddr); + + if(mapCount <= 0) + { + PVR_DPF((PVR_DBG_WARNING, "BM_RemapToDev: failed to allocate device memory")); + } + + return mapCount; +} + +/*! +****************************************************************************** + + @Function DevMemoryAlloc + + @Description Allocate device memory for a given physical/virtual memory + mapping. We handle the main cases where device MMU mappings + are required - these are the dynamic cases: all wrappings of + host OS memory and host OS imports for SYS_MMU_NORMAL mode. + + If no MMU support is required then we simply map device virtual + space as device physical space. + + @Input pBMContext - the pager to allocate from. + @Output pMapping - the mapping descriptor to be filled in for this + allocation. + @Output pActualSize - the actual size of the block allocated in + bytes. + @Input uFlags - allocation flags + @Input dev_vaddr_alignment - required device virtual address + alignment, or 0. + @Output pDevVAddr - receives the device virtual base address of the + allocated block. + @Return IMG_INT32 - Reference count + -1 - Failed. + + *****************************************************************************/ +static IMG_INT32 +DevMemoryAlloc (BM_CONTEXT *pBMContext, + BM_MAPPING *pMapping, + IMG_SIZE_T *pActualSize, + IMG_UINT32 uFlags, + IMG_UINT32 dev_vaddr_alignment, + IMG_DEV_VIRTADDR *pDevVAddr) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; +#ifdef PDUMP + IMG_UINT32 ui32PDumpSize = (IMG_UINT32)pMapping->uSize; +#endif + + if(pMapping->ui32MappingCount > 0) + { + pMapping->ui32MappingCount++; + *pDevVAddr = pMapping->DevVAddr; + return pMapping->ui32MappingCount; + } + + psDeviceNode = pBMContext->psDeviceNode; + + pMapping->ui32DevVAddrAlignment = dev_vaddr_alignment; + + if(uFlags & PVRSRV_MEM_INTERLEAVED) + { + /* double the size */ + /* don't continue to alter the size each time a buffer is remapped.. + * we only want to do this the first time + */ + /* TODO: FIXME: There is something wrong with this logic */ + if (pMapping->ui32MappingCount == 0) + pMapping->uSize *= 2; + } + +#ifdef PDUMP + if(uFlags & PVRSRV_MEM_DUMMY) + { + /* only one page behind a dummy allocation */ + ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize; + } +#endif + + /* Check we haven't fall through a gap */ + PVR_ASSERT(pMapping->uSizeVM != 0); + /* allocate device linear space */ + if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap, + pMapping->uSizeVM, + pActualSize, + 0, + dev_vaddr_alignment, + &(pMapping->DevVAddr))) + { + PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc")); + pDevVAddr->uiAddr = PVRSRV_BAD_DEVICE_ADDRESS; + return -(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY); + } + +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(pBMContext->psMMUContext); +#endif + +#if defined(PDUMP) + /* pdump the memory allocate */ + PDUMPMALLOCPAGES(&psDeviceNode->sDevId, + pMapping->DevVAddr.uiAddr, + pMapping->CpuVAddr, + pMapping->hOSMemHandle, + ui32PDumpSize, + pMapping->pBMHeap->sDevArena.ui32DataPageSize, +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap), +#else + IMG_FALSE, // unused +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + (IMG_HANDLE)pMapping); +#endif + + switch (pMapping->eCpuMemoryOrigin) + { + case hm_wrapped: + case hm_wrapped_virtaddr: + case hm_contiguous: + { + if (uFlags & PVRSRV_MEM_SPARSE) + { + /* Check if this device supports sparse mappings */ + PVR_ASSERT(psDeviceNode->pfnMMUMapPagesSparse != IMG_NULL); + psDeviceNode->pfnMMUMapPagesSparse(pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + SysCpuPAddrToSysPAddr (pMapping->CpuPAddr), + pMapping->ui32ChunkSize, + pMapping->ui32NumVirtChunks, + pMapping->ui32NumPhysChunks, + pMapping->pabMapChunk, + uFlags, + (IMG_HANDLE)pMapping); + } + else + { + psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + SysCpuPAddrToSysPAddr (pMapping->CpuPAddr), + pMapping->uSize, + uFlags, + (IMG_HANDLE)pMapping); + } + *pDevVAddr = pMapping->DevVAddr; + break; + } + case hm_env: + { + if (uFlags & PVRSRV_MEM_SPARSE) + { + /* Check if this device supports sparse mappings */ + PVR_ASSERT(psDeviceNode->pfnMMUMapShadowSparse != IMG_NULL); + psDeviceNode->pfnMMUMapShadowSparse(pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + pMapping->ui32ChunkSize, + pMapping->ui32NumVirtChunks, + pMapping->ui32NumPhysChunks, + pMapping->pabMapChunk, + pMapping->CpuVAddr, + pMapping->hOSMemHandle, + pDevVAddr, + uFlags, + (IMG_HANDLE)pMapping); + } + else + { + psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + pMapping->uSize, + pMapping->CpuVAddr, + pMapping->hOSMemHandle, + pDevVAddr, + uFlags, + (IMG_HANDLE)pMapping); + } + break; + } + case hm_wrapped_scatter: + case hm_wrapped_scatter_virtaddr: + { + psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap, + pMapping->DevVAddr, + pMapping->psSysAddr, + pMapping->uSize, + uFlags, + (IMG_HANDLE)pMapping); + + *pDevVAddr = pMapping->DevVAddr; + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, + "Illegal value %d for pMapping->eCpuMemoryOrigin", + pMapping->eCpuMemoryOrigin)); + return -(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE); + } + +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(pBMContext->psMMUContext); +#endif + + pMapping->ui32MappingCount = 1; + + return pMapping->ui32MappingCount; +} + +static IMG_INT32 +DevMemoryFree (BM_MAPPING *pMapping) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_DEV_PHYADDR sDevPAddr; +#ifdef PDUMP + IMG_UINT32 ui32PSize; +#endif + + if(pMapping->ui32MappingCount > 1) + { + pMapping->ui32MappingCount--; + + /* Nothing else to do for now */ + return pMapping->ui32MappingCount; + } + + if (pMapping->ui32MappingCount == 0) + { + /* already unmapped from GPU.. bail */ + return -(PVRSRV_ERROR_MAPPING_NOT_FOUND); + } + + /* Then pMapping->ui32MappingCount is 1 + * ready to release GPU mapping */ + + psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode; + sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr); + + if (sDevPAddr.uiAddr != 0) + { +#ifdef PDUMP + /* pdump the memory free */ + if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + /* physical memory size differs in the case of Dummy allocations */ + ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + ui32PSize = (IMG_UINT32)pMapping->uSize; + } + + PDUMPFREEPAGES(pMapping->pBMHeap, + pMapping->DevVAddr, + ui32PSize, + pMapping->pBMHeap->sDevArena.ui32DataPageSize, + (IMG_HANDLE)pMapping, + (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE, + (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) ? IMG_TRUE : IMG_FALSE); +#endif + } + PVR_ASSERT(pMapping->uSizeVM != 0); + psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSizeVM)); + + pMapping->ui32MappingCount = 0; + + return pMapping->ui32MappingCount; +} + +/* If this array grows larger, it might be preferable to use a hashtable rather than an array. */ +#ifndef XPROC_WORKAROUND_NUM_SHAREABLES +#define XPROC_WORKAROUND_NUM_SHAREABLES 500 +#endif + +#define XPROC_WORKAROUND_BAD_SHAREINDEX 0773407734 + +#define XPROC_WORKAROUND_UNKNOWN 0 +#define XPROC_WORKAROUND_ALLOC 1 +#define XPROC_WORKAROUND_MAP 2 + +static IMG_UINT32 gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX; +static IMG_UINT32 gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN; + +/* PRQA S 0686 10 */ /* force compiler to init structure */ +XPROC_DATA gXProcWorkaroundShareData[XPROC_WORKAROUND_NUM_SHAREABLES] = {{0}}; + +IMG_INT32 BM_XProcGetShareDataRefCount(IMG_UINT32 ui32Index) +{ + if(ui32Index >= XPROC_WORKAROUND_NUM_SHAREABLES) + return -1; + + return gXProcWorkaroundShareData[ui32Index].ui32RefCount; +} + +PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index) +{ + /* if you fail this assertion - did you acquire the mutex? + did you call "set" exactly once? + did you call "unset" exactly once per set? + */ + if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX) + { + PVR_DPF((PVR_DBG_ERROR, "No, it's already set!")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + gXProcWorkaroundShareIndex = ui32Index; + gXProcWorkaroundState = XPROC_WORKAROUND_MAP; + + return PVRSRV_OK; +} + +PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index) +{ + /* if you fail this assertion - did you acquire the mutex? + did you call "set" exactly once? + did you call "unset" exactly once per set? + */ + if (gXProcWorkaroundShareIndex == XPROC_WORKAROUND_BAD_SHAREINDEX) + { + PVR_DPF((PVR_DBG_ERROR, "huh? how can it be bad??")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + if (gXProcWorkaroundShareIndex != ui32Index) + { + PVR_DPF((PVR_DBG_ERROR, "gXProcWorkaroundShareIndex == 0x%08x != 0x%08x == ui32Index", gXProcWorkaroundShareIndex, ui32Index)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + gXProcWorkaroundShareIndex = XPROC_WORKAROUND_BAD_SHAREINDEX; + gXProcWorkaroundState = XPROC_WORKAROUND_UNKNOWN; + + return PVRSRV_OK; +} + +PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index) +{ + /* if you fail this assertion - did you acquire the mutex? + did you call "set" exactly once? + did you call "unset" exactly once per set? + */ + if (gXProcWorkaroundShareIndex != XPROC_WORKAROUND_BAD_SHAREINDEX) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + for (*pui32Index = 0; *pui32Index < XPROC_WORKAROUND_NUM_SHAREABLES; (*pui32Index)++) + { + if (gXProcWorkaroundShareData[*pui32Index].ui32RefCount == 0) + { + gXProcWorkaroundShareIndex = *pui32Index; + gXProcWorkaroundState = XPROC_WORKAROUND_ALLOC; + return PVRSRV_OK; + } + } + + PVR_DPF((PVR_DBG_ERROR, "ran out of shared buffers")); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +static PVRSRV_ERROR +XProcWorkaroundAllocShareable(RA_ARENA *psArena, + IMG_UINT32 ui32AllocFlags, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PageSize, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_VOID **ppvCpuVAddr, + IMG_HANDLE *phOSMemHandle) +{ + if ((ui32AllocFlags & PVRSRV_MEM_XPROC) == 0) + { + PVR_DPF((PVR_DBG_VERBOSE, "XProcWorkaroundAllocShareable: bad flags")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32RefCount > 0) + { + PVR_DPF((PVR_DBG_VERBOSE, + "XProcWorkaroundAllocShareable: re-using previously allocated pages")); + + ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK; + ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS; + + if (ui32AllocFlags != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags) + { + PVR_DPF((PVR_DBG_ERROR, + "%s ERROR: Flags don't match (Shared 0x%08x, Requested 0x%08x)!", + __FUNCTION__, + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags, + ui32AllocFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Size != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size) + { + PVR_DPF((PVR_DBG_ERROR, + "%s ERROR: Size doesn't match (Shared %d, Requested %d) with flags 0x%08x - 0x%08x!", + __FUNCTION__, + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size, + ui32Size, + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags, + ui32AllocFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32PageSize != gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize) + { + PVR_DPF((PVR_DBG_ERROR, + "%s ERROR: Page Size doesn't match (Shared %d, Requested %d) with flags 0x%08x - 0x%08x!", + __FUNCTION__, + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize, + ui32PageSize, + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags, + ui32AllocFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr; + *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle; + + BM_XProcIndexAcquire(gXProcWorkaroundShareIndex); + + return PVRSRV_OK; + } + else + { + if (gXProcWorkaroundState != XPROC_WORKAROUND_ALLOC) + { + PVR_DPF((PVR_DBG_ERROR, + "XPROC workaround in bad state! About to allocate memory from non-alloc state! (%d)", + gXProcWorkaroundState)); + } + PVR_ASSERT(gXProcWorkaroundState == XPROC_WORKAROUND_ALLOC); + + if (psArena != IMG_NULL) + { + IMG_CPU_PHYADDR sCpuPAddr; + IMG_SYS_PHYADDR sSysPAddr; + + PVR_DPF((PVR_DBG_VERBOSE, + "XProcWorkaroundAllocShareable: making a NEW allocation from local mem")); + + if (!RA_Alloc (psArena, + ui32Size, + IMG_NULL, + IMG_NULL, + 0, + ui32PageSize, + 0, + pvPrivData, + ui32PrivDataLength, + (IMG_UINTPTR_T *)&sSysPAddr.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: RA_Alloc(0x%x) FAILED", ui32Size)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + if(OSReservePhys(sCpuPAddr, + ui32Size, + ui32AllocFlags, + IMG_NULL, + (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr, + &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "XProcWorkaroundAllocShareable: OSReservePhys failed")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].sSysPAddr = sSysPAddr; + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, + "XProcWorkaroundAllocShareable: making a NEW allocation from OS")); + + ui32AllocFlags &= ~PVRSRV_HAP_MAPTYPE_MASK; + ui32AllocFlags |= PVRSRV_HAP_SINGLE_PROCESS; + + /* allocate pages from the OS RAM */ + if (OSAllocPages(ui32AllocFlags, + ui32Size, + ui32PageSize, + pvPrivData, + ui32PrivDataLength, + IMG_NULL, /* FIXME: to support cross process sparse allocations */ + (IMG_VOID **)&gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr, + &gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "XProcWorkaroundAllocShareable: OSAllocPages(0x%x) failed", + ui32PageSize)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].psArena = psArena; + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32AllocFlags = ui32AllocFlags; + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32Size = ui32Size; + gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].ui32PageSize = ui32PageSize; + + *ppvCpuVAddr = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].pvCpuVAddr; + *phOSMemHandle = gXProcWorkaroundShareData[gXProcWorkaroundShareIndex].hOSMemHandle; + + BM_XProcIndexAcquire(gXProcWorkaroundShareIndex); + + return PVRSRV_OK; + } +} + +static PVRSRV_ERROR XProcWorkaroundHandleToSI(IMG_HANDLE hOSMemHandle, IMG_UINT32 *pui32SI) +{ + IMG_UINT32 ui32SI; + IMG_BOOL bFound; + IMG_BOOL bErrorDups; + + bFound = IMG_FALSE; + bErrorDups = IMG_FALSE; + + for (ui32SI = 0; ui32SI < XPROC_WORKAROUND_NUM_SHAREABLES; ui32SI++) + { + if (gXProcWorkaroundShareData[ui32SI].ui32RefCount>0 && gXProcWorkaroundShareData[ui32SI].hOSMemHandle == hOSMemHandle) + { + if (bFound) + { + bErrorDups = IMG_TRUE; + } + else + { + *pui32SI = ui32SI; + bFound = IMG_TRUE; + } + } + } + + if (bErrorDups || !bFound) + { + return PVRSRV_ERROR_BM_BAD_SHAREMEM_HANDLE; + } + + return PVRSRV_OK; +} + +#if defined(PVRSRV_REFCOUNT_DEBUG) +IMG_VOID _BM_XProcIndexAcquireDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +#else +IMG_VOID _BM_XProcIndexAcquire(IMG_UINT32 ui32Index) +#endif +{ +#if defined(PVRSRV_REFCOUNT_DEBUG) + PVRSRVBMXProcIncRef2(pszFile, iLine, ui32Index); +#else + PVRSRVBMXProcIncRef(ui32Index); +#endif +} + +#if defined(PVRSRV_REFCOUNT_DEBUG) +IMG_VOID _BM_XProcIndexReleaseDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +#else +IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index) +#endif +{ +#if defined(PVRSRV_REFCOUNT_DEBUG) + PVRSRVBMXProcDecRef2(pszFile, iLine, ui32Index); +#else + PVRSRVBMXProcDecRef(ui32Index); +#endif + + PVR_DPF((PVR_DBG_VERBOSE, "Reduced refcount of SI[%d] from %d to %d", + ui32Index, gXProcWorkaroundShareData[ui32Index].ui32RefCount+1, gXProcWorkaroundShareData[ui32Index].ui32RefCount)); + + if (gXProcWorkaroundShareData[ui32Index].ui32RefCount == 0) + { + if (gXProcWorkaroundShareData[ui32Index].psArena != IMG_NULL) + { + IMG_SYS_PHYADDR sSysPAddr; + + if (gXProcWorkaroundShareData[ui32Index].pvCpuVAddr != IMG_NULL) + { + OSUnReservePhys(gXProcWorkaroundShareData[ui32Index].pvCpuVAddr, + gXProcWorkaroundShareData[ui32Index].ui32Size, + gXProcWorkaroundShareData[ui32Index].ui32AllocFlags, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle); + } + sSysPAddr = gXProcWorkaroundShareData[ui32Index].sSysPAddr; + RA_Free (gXProcWorkaroundShareData[ui32Index].psArena, + sSysPAddr.uiAddr, + IMG_FALSE); + } + else + { + PVR_DPF((PVR_DBG_VERBOSE, "freeing OS memory")); + OSFreePages(gXProcWorkaroundShareData[ui32Index].ui32AllocFlags, + gXProcWorkaroundShareData[ui32Index].ui32PageSize, + gXProcWorkaroundShareData[ui32Index].pvCpuVAddr, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle); + } + } +} + +static IMG_VOID XProcWorkaroundFreeShareable(IMG_HANDLE hOSMemHandle) +{ + IMG_UINT32 ui32SI = (IMG_UINT32)((IMG_UINTPTR_T)hOSMemHandle & 0xffffU); + PVRSRV_ERROR eError; + + eError = XProcWorkaroundHandleToSI(hOSMemHandle, &ui32SI); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "bad handle")); + return; + } + + BM_XProcIndexRelease(ui32SI); +} + + +/*! +****************************************************************************** + + @Function BM_ImportMemory + + @Description Provide a resource allocator with a source of pages of memory + from the Host OS's own allocation. Allocates a block of pages + larger than requested, allowing the resource allocator to + operate a small cache of pre allocated pages. + + @Input pH - buffer manager handle, not the void type is dictated + by the generic nature of the resource allocator interface. + @Input uRequestSize - requested size in bytes + @Output pActualSize - receives the actual size allocated in bytes + which may be >= requested size + @Output ppsMapping - receives the arbitrary user reference + associated with the underlying storage. + @Input uFlags - bit mask of allocation flags + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + @Output pBase - receives a pointer to the allocated storage. + + @Return IMG_TRUE - success + IMG_FALSE - failed + + *****************************************************************************/ +static IMG_BOOL +BM_ImportMemory (IMG_VOID *pH, + IMG_SIZE_T uRequestSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase) +{ + BM_MAPPING *pMapping; + BM_HEAP *pBMHeap = pH; + BM_CONTEXT *pBMContext = pBMHeap->pBMContext; + IMG_INT32 uResult; + IMG_SIZE_T uSize; + IMG_SIZE_T uPSize; + IMG_SIZE_T uDevVAddrAlignment = 0; /* ? */ + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)", + (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment)); + + PVR_ASSERT (ppsMapping != IMG_NULL); + PVR_ASSERT (pBMContext != IMG_NULL); + + if (ppsMapping == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter")); + goto fail_exit; + } + + uSize = HOST_PAGEALIGN (uRequestSize); + PVR_ASSERT (uSize >= uRequestSize); + + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (BM_MAPPING), + (IMG_PVOID *)&pMapping, IMG_NULL, + "Buffer Manager Mapping") != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc")); + goto fail_exit; + } + + pMapping->hOSMemHandle = 0; + pMapping->CpuVAddr = 0; + pMapping->DevVAddr.uiAddr = 0; + pMapping->ui32MappingCount = 0; + pMapping->CpuPAddr.uiAddr = 0; + pMapping->uSize = uSize; + if ((uFlags & PVRSRV_MEM_SPARSE) == 0) + { + pMapping->uSizeVM = uSize; + } + pMapping->pBMHeap = pBMHeap; + pMapping->ui32Flags = uFlags; + + /* + * If anyone want's to know, pass back the actual size of our allocation. + * There could be up to an extra page's worth of memory which will be marked + * as free in the RA. + */ + if (pActualSize) + { + *pActualSize = uSize; + } + + /* if it's a dummy allocation only use one physical page */ + if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + uPSize = pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + uPSize = pMapping->uSize; + } + + if (uFlags & PVRSRV_MEM_XPROC) + { + IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs | PVRSRV_MEM_XPROC; + IMG_BOOL bBadBackingStoreType; + + if(uFlags & PVRSRV_MEM_ION) + { + ui32Attribs |= PVRSRV_MEM_ION; + } + + bBadBackingStoreType = IMG_TRUE; + + if ((ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) != 0) + { + uDevVAddrAlignment = MAX(pBMHeap->sDevArena.ui32DataPageSize, HOST_PAGESIZE()); + + + if (uPSize % uDevVAddrAlignment != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated")); + goto fail_mapping_alloc; + } + uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */ + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + /* allocate "shared" pages. */ + if (XProcWorkaroundAllocShareable(IMG_NULL, + ui32Attribs, + (IMG_UINT32)uPSize, + pBMHeap->sDevArena.ui32DataPageSize, + pvPrivData, + ui32PrivDataLength, + (IMG_VOID **)&pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed", + uPSize)); + goto fail_mapping_alloc; + } + + /* specify how page addresses are derived */ + /* it works just like "env" now - no need to record + it as shareable, as we use the actual hOSMemHandle + and only divert to our wrapper layer based on Attribs */ + pMapping->eCpuMemoryOrigin = hm_env; + bBadBackingStoreType = IMG_FALSE; + } + + if ((ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) != 0) + { + uDevVAddrAlignment = pBMHeap->sDevArena.ui32DataPageSize; + + if (uPSize % uDevVAddrAlignment != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot use use this memory sharing workaround with allocations that might be suballocated")); + goto fail_mapping_alloc; + } + uDevVAddrAlignment = 0; /* FIXME: find out why it doesn't work if alignment is specified */ + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + /* allocate "shared" pages. */ + if (XProcWorkaroundAllocShareable(pBMHeap->pLocalDevMemArena, + ui32Attribs, + (IMG_UINT32)uPSize, + pBMHeap->sDevArena.ui32DataPageSize, + pvPrivData, + ui32PrivDataLength, + (IMG_VOID **)&pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: XProcWorkaroundAllocShareable(0x%x) failed", + uPSize)); + goto fail_mapping_alloc; + } + + /* specify how page addresses are derived */ + /* it works just like "env" now - no need to record + it as shareable, as we use the actual hOSMemHandle + and only divert to our wrapper layer based on Attribs */ + pMapping->eCpuMemoryOrigin = hm_env; + bBadBackingStoreType = IMG_FALSE; + } + + if (bBadBackingStoreType) + { + PVR_DPF((PVR_DBG_ERROR, "Cannot use this memory sharing workaround with this type of backing store")); + goto fail_mapping_alloc; + } + } + else + + /* + What type of backing store do we have? + */ + if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) + { + IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; + + /* The allocation code needs to know this is a sparse mapping */ + if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) + { + ui32Attribs |= PVRSRV_MEM_SPARSE; + } + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + if (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM) + { + ui32Attribs &= ~PVRSRV_MEM_ALLOCATENONCACHEDMEM; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_MEM_ALLOCATENONCACHEDMEM); + } + + /* allocate pages from the OS RAM */ + if (OSAllocPages(ui32Attribs, + uPSize, + pBMHeap->sDevArena.ui32DataPageSize, + pvPrivData, + ui32PrivDataLength, + pMapping, + (IMG_VOID **)&pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: OSAllocPages(0x%x) failed", + uPSize)); + goto fail_mapping_alloc; + } + + /* specify how page addresses are derived */ + pMapping->eCpuMemoryOrigin = hm_env; + } + else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) + { + IMG_SYS_PHYADDR sSysPAddr; + IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; + + /* The allocation code needs to know this is a sparse mapping */ + if (pMapping->ui32Flags & PVRSRV_MEM_SPARSE) + { + ui32Attribs |= PVRSRV_MEM_SPARSE; + } + + /* allocate pages from the local device memory allocator */ + PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL); + + /* If the user has specified heap CACHETYPE flags, use them to + * override the flags inherited from the heap. + */ + if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; + ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); + } + + if (!RA_Alloc (pBMHeap->pLocalDevMemArena, + uPSize, + IMG_NULL, + IMG_NULL, + 0, + pBMHeap->sDevArena.ui32DataPageSize, + 0, + pvPrivData, + ui32PrivDataLength, + (IMG_UINTPTR_T *)&sSysPAddr.uiAddr)) + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize)); + goto fail_mapping_alloc; + } + + /* derive the CPU virtual address */ + pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + if(OSReservePhys(pMapping->CpuPAddr, + uPSize, + ui32Attribs, + pMapping, + &pMapping->CpuVAddr, + &pMapping->hOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed")); + goto fail_dev_mem_alloc; + } + + /* specify how page addresses are derived */ + pMapping->eCpuMemoryOrigin = hm_contiguous; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type")); + goto fail_mapping_alloc; + } + + if(uFlags & PVRSRV_MEM_ION) + { + IMG_UINT32 ui32AddressOffsets[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES]; + IMG_UINT32 ui32NumAddrOffsets = PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES; + + IMG_INT32 retSize = OSGetMemMultiPlaneInfo(pMapping->hOSMemHandle, + ui32AddressOffsets, &ui32NumAddrOffsets); + + if(retSize > 0 && pActualSize) + { + *pActualSize = pMapping->uSize = retSize; + } + } + + /* + * Allocate some device memory for what we just allocated. + */ + /* + * Do not allocate GPU mapping if NO_GPU_VIRTUAL_ON_ALLOC is requested. + * In the case where CBI is enabled, this allows for late + * GPU mapping. This flag is, otherwise, used in cases where only + * the memory management feature of the driver is utilized, without + * a need for GPU rendering + */ + if ((uFlags & (PVRSRV_MEM_SPARSE | PVRSRV_HAP_NO_GPU_VIRTUAL_ON_ALLOC)) == 0) + { + uResult = DevMemoryAlloc (pBMContext, + pMapping, + IMG_NULL, + uFlags, + (IMG_UINT32)uDevVAddrAlignment, + &pMapping->DevVAddr); + if (uResult <= 0) + { + PVR_DPF((PVR_DBG_ERROR, + "BM_ImportMemory: DevMemoryAlloc(0x%x) failed", + pMapping->uSize)); + goto fail_dev_mem_alloc; + } + + /* uDevVAddrAlignment is currently set to zero so QAC generates warning which we override */ + /* PRQA S 3356,3358 1 */ + PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1); + PVR_ASSERT(pBase); + } + + if(pBase) + *pBase = pMapping->DevVAddr.uiAddr; + *ppsMapping = pMapping; + + PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE")); + return IMG_TRUE; + +fail_dev_mem_alloc: + if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) + { + /* the size is double the actual size for interleaved allocations */ + if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) + { + pMapping->uSize /= 2; + } + + if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + uPSize = pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + uPSize = pMapping->uSize; + } + + if (uFlags & PVRSRV_MEM_XPROC) + { + XProcWorkaroundFreeShareable(pMapping->hOSMemHandle); + } + else + if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) + { + OSFreePages(pBMHeap->ui32Attribs, + uPSize, + (IMG_VOID *)pMapping->CpuVAddr, + pMapping->hOSMemHandle); + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + + if(pMapping->CpuVAddr) + { + OSUnReservePhys(pMapping->CpuVAddr, + uPSize, + pBMHeap->ui32Attribs, + pMapping->hOSMemHandle); + } + sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr); + RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + } + } +fail_mapping_alloc: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); + /*not nulling pointer, out of scope*/ +fail_exit: + return IMG_FALSE; +} + + +/*! +****************************************************************************** + + @Function BM_FreeMemory + + @Description Free a block of pages previously allocated via + BM_ImportMemory. + + @Input h - buffer manager handle, not the void type as dictated by + the generic nature of the resource allocator interface. + @Input _base - base address of blocks to free. + @Input psMapping - arbitrary user reference associated with the + underlying storage provided by BM_ImportMemory + @Return None + + *****************************************************************************/ +static IMG_VOID +BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping) +{ + BM_HEAP *pBMHeap = h; + IMG_SIZE_T uPSize; + + PVR_UNREFERENCED_PARAMETER (_base); + + PVR_DPF ((PVR_DBG_MESSAGE, + "BM_FreeMemory (h=0x%x, base=0x%x, psMapping=0x%x)", + (IMG_UINTPTR_T)h, _base, (IMG_UINTPTR_T)psMapping)); + + PVR_ASSERT (psMapping != IMG_NULL); + + if (psMapping == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter")); + return; + } + + /* + Only free the virtual memory if we got as far a allocating it. + This NULL check should be safe as we always have a guard page + at virtual address 0x00000000 + */ + if (psMapping->DevVAddr.uiAddr) + { + DevMemoryFree (psMapping); + } + + /* the size is double the actual for interleaved */ + if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) + { + psMapping->uSize /= 2; + } + + if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY) + { + uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize; + } + else + { + uPSize = psMapping->uSize; + } + + if (psMapping->ui32Flags & PVRSRV_MEM_XPROC) + { + XProcWorkaroundFreeShareable(psMapping->hOSMemHandle); + } + else + if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) + { + OSFreePages(pBMHeap->ui32Attribs, + uPSize, + (IMG_VOID *) psMapping->CpuVAddr, + psMapping->hOSMemHandle); + } + else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) + { + IMG_SYS_PHYADDR sSysPAddr; + + OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle); + + sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr); + + RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type")); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + PVR_DPF((PVR_DBG_MESSAGE, + "..BM_FreeMemory (h=0x%x, base=0x%x)", + (IMG_UINTPTR_T)h, _base)); +} + +/*! +****************************************************************************** + + @Function BM_GetPhysPageAddr + + @Description + + @Input psMemInfo + + @Input sDevVPageAddr + + @Output psDevPAddr + + @Return IMG_VOID + +******************************************************************************/ + +IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_DEV_VIRTADDR sDevVPageAddr, + IMG_DEV_PHYADDR *psDevPAddr) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + + PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr")); + + PVR_ASSERT(psMemInfo && psDevPAddr); + + /* check it's a page address */ + PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0); + + /* PRQA S 0505 4 */ /* PVR_ASSERT should catch NULL ptrs */ + psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode; + + *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap, + sDevVPageAddr); +} + + +/*! +****************************************************************************** + @Function BM_GetMMUContext + + @Description utility function to return the MMU context + + @Input hDevMemHeap - the Dev mem heap handle + + @Return MMU context, else NULL +**************************************************************************/ +MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap) +{ + BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap; + + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext")); + + return pBMHeap->pBMContext->psMMUContext; +} + +/*! +****************************************************************************** + @Function BM_GetMMUContextFromMemContext + + @Description utility function to return the MMU context + + @Input hDevMemContext - the Dev mem context handle + + @Return MMU context, else NULL +**************************************************************************/ +MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext) +{ + BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext; + + PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext")); + + return pBMContext->psMMUContext; +} + +/*! +****************************************************************************** + @Function BM_GetMMUHeap + + @Description utility function to return the MMU heap handle + + @Input hDevMemHeap - the Dev mem heap handle + + @Return MMU heap handle, else NULL +**************************************************************************/ +IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap) +{ + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap")); + + return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap; +} + + +/*! +****************************************************************************** + @Function BM_GetDeviceNode + + @Description utility function to return the devicenode from the BM Context + + @Input hDevMemContext - the Dev Mem Context + + @Return MMU heap handle, else NULL +**************************************************************************/ +PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext) +{ + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode")); + + return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode; +} + + +/*! +****************************************************************************** + @Function BM_GetMappingHandle + + @Description utility function to return the mapping handle from a meminfo + + @Input psMemInfo - kernel meminfo + + @Return mapping handle, else NULL +**************************************************************************/ +IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle")); + + return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle; +} + +/*! +****************************************************************************** + @Function BM_MappingHandleFromBuffer + + @Description utility function to get the BM mapping handle from a BM buffer + + @Input hBuffer - Handle to BM buffer + + @Return BM mapping handle +**************************************************************************/ +IMG_HANDLE BM_MappingHandleFromBuffer(IMG_HANDLE hBuffer) +{ + BM_BUF *psBuffer; + + PVR_ASSERT(hBuffer != IMG_NULL); + psBuffer = hBuffer; + return psBuffer->pMapping; +} + +/*! +****************************************************************************** + @Function BM_GetVirtualSize + + @Description utility function to get the VM size of a BM mapping + + @Input hBMHandle - Handle to BM mapping + + @Return VM size of mapping +**************************************************************************/ +IMG_UINT32 BM_GetVirtualSize(IMG_HANDLE hBMHandle) +{ + BM_MAPPING *psMapping; + + PVR_ASSERT(hBMHandle != IMG_NULL); + psMapping = hBMHandle; + return psMapping->ui32ChunkSize * psMapping->ui32NumVirtChunks; +} + +/*! +****************************************************************************** + @Function BM_MapPageAtOffset + + @Description utility function check if the specificed offset in a BM mapping + is a page that needs tp be mapped + + @Input hBMHandle - Handle to BM mapping + + @Input ui32Offset - Offset into allocation + + @Return IMG_TRUE if the page should be mapped +**************************************************************************/ +IMG_BOOL BM_MapPageAtOffset(IMG_HANDLE hBMHandle, IMG_UINT32 ui32Offset) +{ + BM_MAPPING *psMapping; + IMG_UINT32 ui32ChunkIndex; + + PVR_ASSERT(hBMHandle != IMG_NULL); + psMapping = hBMHandle; + + ui32ChunkIndex = ui32Offset / psMapping->ui32ChunkSize; + /* Check for overrun */ + PVR_ASSERT(ui32ChunkIndex <= psMapping->ui32NumVirtChunks); + return psMapping->pabMapChunk[ui32ChunkIndex]; +} + +/*! +****************************************************************************** + @Function BM_VirtOffsetToPhyscial + + @Description utility function find of physical offset of a sparse allocation + from it's virtual offset. + + @Input hBMHandle - Handle to BM mapping + + @Input ui32VirtOffset - Virtual offset into allocation + + @Output pui32PhysOffset - Physical offset + + @Return IMG_TRUE if the virtual offset is physically backed +**************************************************************************/ +IMG_BOOL BM_VirtOffsetToPhysical(IMG_HANDLE hBMHandle, + IMG_UINT32 ui32VirtOffset, + IMG_UINT32 *pui32PhysOffset) +{ + BM_MAPPING *psMapping; + IMG_UINT32 ui32ChunkOffset; + IMG_UINT32 ui32PhysOffset = 0; + IMG_UINT32 i; + + PVR_ASSERT(hBMHandle != IMG_NULL); + psMapping = hBMHandle; + + ui32ChunkOffset = ui32VirtOffset / psMapping->ui32ChunkSize; + if (!psMapping->pabMapChunk[ui32ChunkOffset]) + { + return IMG_FALSE; + } + + for (i=0;i<ui32ChunkOffset;i++) + { + if (psMapping->pabMapChunk[i]) + { + ui32PhysOffset += psMapping->ui32ChunkSize; + } + } + *pui32PhysOffset = ui32PhysOffset; + + return IMG_TRUE; +} +/****************************************************************************** + End of file (buffer_manager.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/deviceclass.c b/pvr-source/services4/srvkm/common/deviceclass.c new file mode 100644 index 0000000..d047c78 --- /dev/null +++ b/pvr-source/services4/srvkm/common/deviceclass.c @@ -0,0 +1,2863 @@ +/*************************************************************************/ /*! +@Title Device class services functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Kernel services functions for device class devices +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "buffer_manager.h" +#include "kernelbuffer.h" +#include "kerneldisplay.h" +#include "pvr_bridge_km.h" +#include "pdump_km.h" +#include "deviceid.h" + +#include "lists.h" +#if defined(CONFIG_GCBV) +#include "gc_bvmapping.h" +#endif + +PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID); +PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID); + +#if defined(SUPPORT_MISR_IN_THREAD) +void OSVSyncMISR(IMG_HANDLE, IMG_BOOL); +#endif + +#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS) +IMG_VOID PVRSRVFreeCommandCompletePacketKM(IMG_HANDLE hCmdCookie, + IMG_BOOL bScheduleMISR); +#endif +/*********************************************************************** + Local Display Class Structures +************************************************************************/ +typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE; + +/* + Display Class Buffer Info +*/ +typedef struct PVRSRV_DC_BUFFER_TAG +{ + /* BC/DC common details - THIS MUST BE THE FIRST MEMBER */ + PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer; + + struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo; + struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain; +} PVRSRV_DC_BUFFER; + +/* + Display Device Class kernel swapchain information structure +*/ +typedef struct PVRSRV_DC_SWAPCHAIN_TAG +{ + IMG_HANDLE hExtSwapChain; + IMG_UINT32 ui32SwapChainID; + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32Flags; + PVRSRV_QUEUE_INFO *psQueue; + PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; + IMG_UINT32 ui32BufferCount; + PVRSRV_DC_BUFFER *psLastFlipBuffer; + IMG_UINT32 ui32MinSwapInterval; + IMG_UINT32 ui32MaxSwapInterval; +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + PVRSRV_KERNEL_SYNC_INFO **ppsLastSyncInfos; + IMG_UINT32 ui32LastNumSyncInfos; +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo; + struct PVRSRV_DC_SWAPCHAIN_TAG *psNext; +} PVRSRV_DC_SWAPCHAIN; + + +/* + Display Device Class kernel swapchain referecne structure +*/ +typedef struct PVRSRV_DC_SWAPCHAIN_REF_TAG +{ + struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain; + IMG_HANDLE hResItem; +} PVRSRV_DC_SWAPCHAIN_REF; + + +/* + Display Device Class kernel services information structure +*/ +typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG +{ + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32DeviceID; + IMG_HANDLE hExtDevice; + PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable; + IMG_HANDLE hDevMemContext; + PVRSRV_DC_BUFFER sSystemBuffer; + struct PVRSRV_DC_SWAPCHAIN_TAG *psDCSwapChainShared; +} PVRSRV_DISPLAYCLASS_INFO; + + +/* + Per-context Display Device Class kernel services information structure +*/ +typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PRESMAN_ITEM hResItem; +} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO; + + +/*********************************************************************** + Local Buffer Class Structures +************************************************************************/ +typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE; + +/* + Buffer Class Buffer Info +*/ +typedef struct PVRSRV_BC_BUFFER_TAG +{ + /* BC/DC common details - THIS MUST BE THE FIRST MEMBER */ + PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer; + + struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo; +} PVRSRV_BC_BUFFER; + + +/* + Buffer Device Class kernel services information structure +*/ +typedef struct PVRSRV_BUFFERCLASS_INFO_TAG +{ + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32DeviceID; + IMG_HANDLE hExtDevice; + PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable; + IMG_HANDLE hDevMemContext; + /* buffer info returned from 3rd party driver */ + IMG_UINT32 ui32BufferCount; + PVRSRV_BC_BUFFER *psBuffer; + +} PVRSRV_BUFFERCLASS_INFO; + + +/* + Per-context Buffer Device Class kernel services information structure +*/ +typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + IMG_HANDLE hResItem; +} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO; + + +/*! +****************************************************************************** + @Function DCDeviceHandleToDCInfo + + @Description + + Convert a client-visible 3rd party device class handle to an internal + PVRSRV_DISPLAYCLASS_INFO pointer. + + @Input hDeviceKM - handle to display class device, returned from OpenDCDevice + + @Return + success: pointer to PVRSRV_DISPLAYCLASS_INFO + failure: IMG_NULL +******************************************************************************/ +static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM) +{ + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + + psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM; + + return psDCPerContextInfo->psDCInfo; +} + + +/*! +****************************************************************************** + @Function BCDeviceHandleToBCInfo + + @Description + + Convert a client-visible 3rd party buffer class handle to an internal + PVRSRV_BUFFERCLASS_INFO pointer. + + @Input hDeviceKM - handle to buffer class device, returned from OpenBCDevice + + @Return + success: pointer to PVRSRV_BUFFERCLASS_INFO + failure: IMG_NULL +******************************************************************************/ +static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM) +{ + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + + psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM; + + return psBCPerContextInfo->psBCInfo; +} + +/*! +****************************************************************************** + @Function PVRSRVEnumerateDCKM_ForEachVaCb + + @Description + + Enumerates the device node (if is of the same class as given). + + @Input psDeviceNode - The device node to be enumerated + va - variable arguments list, with: + pui32DevCount - The device count pointer (to be increased) + ppui32DevID - The pointer to the device IDs pointer (to be updated and increased) + peDeviceClass - The pointer to the device class of the psDeviceNode's to be enumerated. +******************************************************************************/ +static IMG_VOID PVRSRVEnumerateDCKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + IMG_UINT *pui32DevCount; + IMG_UINT32 **ppui32DevID; + PVRSRV_DEVICE_CLASS peDeviceClass; + + pui32DevCount = va_arg(va, IMG_UINT*); + ppui32DevID = va_arg(va, IMG_UINT32**); + peDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS); + + if ((psDeviceNode->sDevId.eDeviceClass == peDeviceClass) + && (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT)) + { + (*pui32DevCount)++; + if(*ppui32DevID) + { + *(*ppui32DevID)++ = psDeviceNode->sDevId.ui32DeviceIndex; + } + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVEnumerateDCKM + + @Description + + Enumerates devices available in a given class. + On first call, pass valid ptr for pui32DevCount and IMG_NULL for pui32DevID, + On second call, pass same ptr for pui32DevCount and client allocated ptr + for pui32DevID device id list + + @Input hServices - handle for services connection + @Input ui32DevClass - device class identifier + @Output pui32DevCount - number of devices available in class + @Output pui32DevID - list of device ids in the device class + + @Return + success: handle to matching display class device + failure: IMG_NULL + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass, + IMG_UINT32 *pui32DevCount, + IMG_UINT32 *pui32DevID ) +{ + /*PVRSRV_DEVICE_NODE *psDeviceNode;*/ + IMG_UINT ui32DevCount = 0; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* search devonode list for devices in specified class and return the device ids */ + List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList, + &PVRSRVEnumerateDCKM_ForEachVaCb, + &ui32DevCount, + &pui32DevID, + DeviceClass); + + if(pui32DevCount) + { + *pui32DevCount = ui32DevCount; + } + else if(pui32DevID == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters")); + return (PVRSRV_ERROR_INVALID_PARAMS); + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterDCDeviceKM + + @Description + + registers an external device with the system + + @Input psFuncTable : device function table + + @Output pui32DeviceID : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable, + IMG_UINT32 *pui32DeviceID) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo = IMG_NULL; + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + + /* + IN: + - name of client side ext. device driver library for subsequent loading + - predefined list of callbacks into kernel ext. device driver (based on class type) + + FUNCTION TASKS: + - allocate display device class info structure + - hang ext.device kernel callbacks on this structure (pfnKSwapToSystem) + + OUT: + - DEVICE_ID + - pass back devinfo? no + + Q&A: + - DEVICE_ID passed in or allocated - assume allocate + */ + + SysAcquireData(&psSysData); + + /* + If we got this far we're doing dynamic enumeration + or first time static registration + */ + + /* Allocate device control block */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psDCInfo), + (IMG_VOID **)&psDCInfo, IMG_NULL, + "Display Class Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psDCInfo, 0, sizeof(*psDCInfo)); + + /* setup the display device information structure */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), + (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL, + "Function table for SRVKM->DISPLAY") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc")); + goto ErrorExit; + } + OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE)); + + /* copy the jump table */ + *psDCInfo->psFuncTable = *psFuncTable; + + /* Allocate device node */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), + (IMG_VOID **)&psDeviceNode, IMG_NULL, + "Device Node") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc")); + goto ErrorExit; + } + OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); + + psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo; + psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo); + psDeviceNode->ui32RefCount = 1; + psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT; + psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY; + psDeviceNode->psSysData = psSysData; + + /* allocate a unique device id */ + if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID")); + goto ErrorExit; + } + psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + if (pui32DeviceID) + { + *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + } + + /* Register the device with the system */ + SysRegisterExternalDevice(psDeviceNode); + + /* and finally insert the device into the dev-list */ + List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode); + + return PVRSRV_OK; + +ErrorExit: + + if(psDCInfo->psFuncTable) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL); + psDCInfo->psFuncTable = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +/*! +****************************************************************************** + + @Function PVRSRVRemoveDCDeviceKM + + @Description + + Removes external device from services system record + + @Input ui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + SysAcquireData(&psSysData); + + /*search the node matching the devindex and display class*/ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_DISPLAY); + if (!psDeviceNode) + { + /*device not found*/ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + + /* setup DCInfo ptr */ + psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice; + + /* + The device can only be removed if there are + no open connections in the Services interface + */ + if(psDCInfo->ui32RefCount == 0) + { + /* + Remove from the device list. + */ + List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); + + /* Unregister the device with the system */ + SysRemoveExternalDevice(psDeviceNode); + + /* + OK found a device with a matching devindex + remove registration information + */ + PVR_ASSERT(psDCInfo->ui32RefCount == 0); + (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex); + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL); + psDCInfo->psFuncTable = IMG_NULL; + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL); + /*not nulling original pointer, overwritten*/ + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + else + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: failed as %d Services DC API connections are still open", psDCInfo->ui32RefCount)); + return PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterBCDeviceKM + + @Description + + registers an external device with the system + + @Input psFuncTable : device function table + @Input ui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable, + IMG_UINT32 *pui32DeviceID) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL; + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + /* + IN: + - name of client side ext. device driver library for subsequent loading + - predefined list of callbacks into kernel ext. device driver (based on class type) + + FUNCTION TASKS: + - allocate buffer device class info structure + + OUT: + - DEVICE_ID + - pass back devinfo? no + + Q&A: + - DEVICE_ID passed in or allocated - assume allcoate + */ + + SysAcquireData(&psSysData); + + /* + If we got this far we're doing dynamic enumeration + or first time static registration + */ + + /* Allocate device control block */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psBCInfo), + (IMG_VOID **)&psBCInfo, IMG_NULL, + "Buffer Class Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psBCInfo, 0, sizeof(*psBCInfo)); + + /* setup the buffer device information structure */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), + (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL, + "Function table for SRVKM->BUFFER") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc")); + goto ErrorExit; + } + OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE)); + + /* copy the jump table */ + *psBCInfo->psFuncTable = *psFuncTable; + + /* Allocate device node */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), + (IMG_VOID **)&psDeviceNode, IMG_NULL, + "Device Node") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc")); + goto ErrorExit; + } + OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); + + psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo; + psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo); + psDeviceNode->ui32RefCount = 1; + psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT; + psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER; + psDeviceNode->psSysData = psSysData; + + /* allocate a unique device id */ + if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID")); + goto ErrorExit; + } + psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + if (pui32DeviceID) + { + *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex; + } + + /* and finally insert the device into the dev-list */ + List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode); + + return PVRSRV_OK; + +ErrorExit: + + if(psBCInfo->psFuncTable) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PPVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL); + psBCInfo->psFuncTable = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL); + /*not nulling shared pointer, wasn't allocated to this point*/ + + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRemoveBCDeviceKM + + @Description + + Removes external device from services system record + + @Input ui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psDevNode; + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + + SysAcquireData(&psSysData); + + /*search the device node with the devindex and buffer class*/ + psDevNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_BUFFER); + + if (!psDevNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + + /* set-up devnode ptr */ +/* psDevNode = *(ppsDevNode); */ + /* setup BCInfo ptr */ + psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice; + + /* + The device can only be removed if there are + no open connections in the Services interface + */ + if(psBCInfo->ui32RefCount == 0) + { + /* + Remove from the device list. + */ + List_PVRSRV_DEVICE_NODE_Remove(psDevNode); + + /* + OK found a device with a matching devindex + remove registration information + */ + (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex); + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL); + psBCInfo->psFuncTable = IMG_NULL; + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDevNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + else + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: failed as %d Services BC API connections are still open", psBCInfo->ui32RefCount)); + return PVRSRV_ERROR_UNABLE_TO_REMOVE_DEVICE; + } + + return PVRSRV_OK; +} + + + +/*! +****************************************************************************** + + @Function PVRSRVCloseDCDeviceKM + + @Description + + Closes a connection to the Display Class device + + @Input hDeviceKM : device handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE hDeviceKM) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + + psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM; + + /* Remove the item from the resman list and trigger the callback. */ + eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem, CLEANUP_WITH_POLL); + + return eError; +} + + +static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam; + psDCInfo = psDCPerContextInfo->psDCInfo; + + if(psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount != 0) + { + PVR_DPF((PVR_DBG_MESSAGE,"CloseDCDeviceCallBack: system buffer (0x%p) still mapped (refcount = %d)", + &psDCInfo->sSystemBuffer.sDeviceClassBuffer, + psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount)); + } + + psDCInfo->ui32RefCount--; + if(psDCInfo->ui32RefCount == 0) + { + /* close the external device */ + psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice); + + PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + + psDCInfo->hDevMemContext = IMG_NULL; + psDCInfo->hExtDevice = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), psDCPerContextInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVOpenDCDeviceKM + + @Description + + Opens a connection to the Display Class device, associating the connection + with a Device Memory Context for a services managed device + + @Input psPerProc : Per-process data + @Input ui32DeviceID : unique device index + @Input hDevCookie : devcookie used to derive the Device Memory + Context into BC surfaces will be mapped into + @Outut phDeviceKM : handle to the DC device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVOpenDCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32DeviceID, + IMG_HANDLE hDevCookie, + IMG_HANDLE *phDeviceKM) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + if(!phDeviceKM || !hDevCookie) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* find the matching devicenode */ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DeviceID, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_DISPLAY); + if (!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice; + + /* + Allocate the per-context DC Info before calling the external device, + to make error handling easier. + */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psDCPerContextInfo), + (IMG_VOID **)&psDCPerContextInfo, IMG_NULL, + "Display Class per Context Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo)); + + if(psDCInfo->ui32RefCount++ == 0) + { + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* store the device kernel context to map into */ + psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext; + + /* create a syncinfo for the device's system surface */ + eError = PVRSRVAllocSyncInfoKM(IMG_NULL, + (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext, + &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc")); + psDCInfo->ui32RefCount--; + return eError; + } + + /* open the external device */ + eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID, + &psDCInfo->hExtDevice, + (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device")); + psDCInfo->ui32RefCount--; + PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + return eError; + } + + psDCPerContextInfo->psDCInfo = psDCInfo; + eError = PVRSRVGetDCSystemBufferKM(psDCPerContextInfo, IMG_NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to get system buffer")); + psDCInfo->ui32RefCount--; + PVRSRVKernelSyncInfoDecRef(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + return eError; + } + psDCInfo->sSystemBuffer.sDeviceClassBuffer.ui32MemMapRefCount = 0; + } + else + { + psDCPerContextInfo->psDCInfo = psDCInfo; + } + + psDCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DISPLAYCLASS_DEVICE, + psDCPerContextInfo, + 0, + &CloseDCDeviceCallBack); + + /* return a reference to the DCPerContextInfo */ + *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVEnumDCFormatsKM + + @Description + + Enumerates the devices pixel formats + + @Input hDeviceKM : device handle + @Output pui32Count : number of pixel formats + @Output psFormat : format list + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM, + IMG_UINT32 *pui32Count, + DISPLAY_FORMAT *psFormat) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + if(!hDeviceKM || !pui32Count || !psFormat) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat); +} + + + +/*! +****************************************************************************** + + @Function PVRSRVEnumDCDimsKM + + @Description + + Enumerates the devices mode dimensions for a given pixel format + + @Input hDeviceKM : device handle + @Input psFormat : pixel format + @Output pui32Count : number of dimensions + @Output psDim : dimensions list + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM, + DISPLAY_FORMAT *psFormat, + IMG_UINT32 *pui32Count, + DISPLAY_DIMS *psDim) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + + if(!hDeviceKM || !pui32Count || !psFormat) // psDim==NULL to query number of dims + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim); +} + + +/*! +****************************************************************************** + + @Function PVRSRVGetDCSystemBufferKM + + @Description + + Get the primary surface and optionally return its buffer handle + + @Input hDeviceKM : device handle + @Output phBuffer : Optional buffer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM, + IMG_HANDLE *phBuffer) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + IMG_HANDLE hExtBuffer; + + if(!hDeviceKM) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver")); + return eError; + } + + /* save the new info */ + psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr; + psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext; + psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice; + psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer; + + psDCInfo->sSystemBuffer.psDCInfo = psDCInfo; + + /* return handle */ + if (phBuffer) + { + *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer); + } + + return PVRSRV_OK; +} + + +/****************************************************************************** + + @Function PVRSRVGetDCInfoKM + + @Description + + Gets Display Class device Info + + @Input hDeviceKM : device handle + @Output psDisplayInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM, + DISPLAY_INFO *psDisplayInfo) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_ERROR eError; + + if(!hDeviceKM || !psDisplayInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* call into the display device driver to get info */ + eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo); + if (eError != PVRSRV_OK) + { + return eError; + } + + if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) + { + psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS; + } + + return PVRSRV_OK; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChainRef) +{ + PVRSRV_ERROR eError; + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef; + + if(!hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psSwapChainRef = hSwapChainRef; + + eError = ResManFreeResByPtr(psSwapChainRef->hResItem, CLEANUP_WITH_POLL); + + return eError; +} + + +static PVRSRV_ERROR DestroyDCSwapChain(PVRSRV_DC_SWAPCHAIN *psSwapChain) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo; + IMG_UINT32 i; + + /* Update shared swapchains list */ + if( psDCInfo->psDCSwapChainShared ) + { + if( psDCInfo->psDCSwapChainShared == psSwapChain ) + { + psDCInfo->psDCSwapChainShared = psSwapChain->psNext; + } + else + { + PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain; + psCurrentSwapChain = psDCInfo->psDCSwapChainShared; + while( psCurrentSwapChain->psNext ) + { + if( psCurrentSwapChain->psNext != psSwapChain ) + { + psCurrentSwapChain = psCurrentSwapChain->psNext; + continue; + } + psCurrentSwapChain->psNext = psSwapChain->psNext; + break; + } + } + } + + /* Destroy command queue before swapchain - it may use the swapchain when commands are flushed. */ + PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue); + + /* call into the display device driver to destroy a swapchain */ + eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DestroyDCSwapChainCallBack: Failed to destroy DC swap chain")); + return eError; + } + + /* free the resources */ + for(i=0; i<psSwapChain->ui32BufferCount; i++) + { + if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + if (psSwapChain->ppsLastSyncInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * psSwapChain->ui32LastNumSyncInfos, + psSwapChain->ppsLastSyncInfos, IMG_NULL); + } +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return eError; +} + + +static PVRSRV_ERROR DestroyDCSwapChainRefCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF *) pvParam; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + for (i = 0; i < psSwapChainRef->psSwapChain->ui32BufferCount; i++) + { + if (psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "DestroyDCSwapChainRefCallBack: swapchain (0x%p) still mapped (ui32MemMapRefCount = %d)", + &psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer, + psSwapChainRef->psSwapChain->asBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount)); + } + } + + if(--psSwapChainRef->psSwapChain->ui32RefCount == 0) + { + eError = DestroyDCSwapChain(psSwapChainRef->psSwapChain); + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF), psSwapChainRef, IMG_NULL); + return eError; +} + +static PVRSRV_DC_SWAPCHAIN* PVRSRVFindSharedDCSwapChainKM(PVRSRV_DISPLAYCLASS_INFO *psDCInfo, + IMG_UINT32 ui32SwapChainID) +{ + PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain; + + for(psCurrentSwapChain = psDCInfo->psDCSwapChainShared; + psCurrentSwapChain; + psCurrentSwapChain = psCurrentSwapChain->psNext) + { + if(psCurrentSwapChain->ui32SwapChainID == ui32SwapChainID) + return psCurrentSwapChain; + } + return IMG_NULL; +} + +static PVRSRV_ERROR PVRSRVCreateDCSwapChainRefKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + PVRSRV_DC_SWAPCHAIN *psSwapChain, + PVRSRV_DC_SWAPCHAIN_REF **ppsSwapChainRef) +{ + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL; + + /* Allocate swapchain reference structre*/ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_SWAPCHAIN_REF), + (IMG_VOID **)&psSwapChainRef, IMG_NULL, + "Display Class Swapchain Reference") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainRefKM: Failed psSwapChainRef alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psSwapChainRef, 0, sizeof(PVRSRV_DC_SWAPCHAIN_REF)); + + /* Bump refcount */ + psSwapChain->ui32RefCount++; + + /* Create reference resource */ + psSwapChainRef->psSwapChain = psSwapChain; + psSwapChainRef->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, + psSwapChainRef, + 0, + &DestroyDCSwapChainRefCallBack); + *ppsSwapChainRef = psSwapChainRef; + + return PVRSRV_OK; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDeviceKM, + IMG_UINT32 ui32Flags, + DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib, + DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib, + IMG_UINT32 ui32BufferCount, + IMG_UINT32 ui32OEMFlags, + IMG_HANDLE *phSwapChainRef, + IMG_UINT32 *pui32SwapChainID) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL; + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL; + PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; + PVRSRV_QUEUE_INFO *psQueue = IMG_NULL; + PVRSRV_ERROR eError; + IMG_UINT32 i; + DISPLAY_INFO sDisplayInfo; + + + if(!hDeviceKM + || !psDstSurfAttrib + || !psSrcSurfAttrib + || !phSwapChainRef + || !pui32SwapChainID) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers")); + return PVRSRV_ERROR_TOOMANYBUFFERS; + } + + if (ui32BufferCount < 2) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too few buffers")); + return PVRSRV_ERROR_TOO_FEW_BUFFERS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_QUERY ) + { + /* Query - use pui32SwapChainID as input */ + psSwapChain = PVRSRVFindSharedDCSwapChainKM(psDCInfo, *pui32SwapChainID ); + if( psSwapChain ) + { + /* Create new reference */ + eError = PVRSRVCreateDCSwapChainRefKM(psPerProc, + psSwapChain, + &psSwapChainRef); + if( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference")); + return eError; + } + + *phSwapChainRef = (IMG_HANDLE)psSwapChainRef; + return PVRSRV_OK; + } + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: No shared SwapChain found for query")); + return PVRSRV_ERROR_FLIP_CHAIN_EXISTS; + } + + /* Allocate swapchain control structure for srvkm */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_SWAPCHAIN), + (IMG_VOID **)&psSwapChain, IMG_NULL, + "Display Class Swapchain") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExit; + } + OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN)); + + /* Create a command queue for the swapchain */ + eError = PVRSRVCreateCommandQueueKM(1024, &psQueue); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue")); + goto ErrorExit; + } + + /* store the Queue */ + psSwapChain->psQueue = psQueue; + + /* Create a Sync Object for each surface in the swapchain */ + for(i=0; i<ui32BufferCount; i++) + { + eError = PVRSRVAllocSyncInfoKM(IMG_NULL, + psDCInfo->hDevMemContext, + &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain")); + goto ErrorExit; + } + + /* setup common device class info */ + psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr; + psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext; + psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice; + + /* save off useful ptrs */ + psSwapChain->asBuffer[i].psDCInfo = psDCInfo; + psSwapChain->asBuffer[i].psSwapChain = psSwapChain; + + /* syncinfos must be passed as array of syncdata ptrs to the 3rd party driver */ + apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM; + } + + psSwapChain->ui32BufferCount = ui32BufferCount; + psSwapChain->psDCInfo = psDCInfo; + +#if defined(PDUMP) + PDUMPCOMMENT("Allocate DC swap chain (SwapChainID == %u, BufferCount == %u)", + *pui32SwapChainID, + ui32BufferCount); + PDUMPCOMMENT(" Src surface dimensions == %u x %u", + psSrcSurfAttrib->sDims.ui32Width, + psSrcSurfAttrib->sDims.ui32Height); + PDUMPCOMMENT(" Dst surface dimensions == %u x %u", + psDstSurfAttrib->sDims.ui32Width, + psDstSurfAttrib->sDims.ui32Height); +#endif + + eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, &sDisplayInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to get DC info")); + return eError; + } + + psSwapChain->ui32MinSwapInterval = sDisplayInfo.ui32MinSwapInterval; + psSwapChain->ui32MaxSwapInterval = sDisplayInfo.ui32MaxSwapInterval; + + /* call into the display device driver to create a swapchain */ + eError = psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice, + ui32Flags, + psDstSurfAttrib, + psSrcSurfAttrib, + ui32BufferCount, + apsSyncData, + ui32OEMFlags, + &psSwapChain->hExtSwapChain, + &psSwapChain->ui32SwapChainID); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain")); + PDUMPCOMMENT("Swapchain allocation failed."); + goto ErrorExit; + } + + /* Create new reference */ + eError = PVRSRVCreateDCSwapChainRefKM(psPerProc, + psSwapChain, + &psSwapChainRef); + if( eError != PVRSRV_OK ) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference")); + PDUMPCOMMENT("Swapchain allocation failed."); + goto ErrorExit; + } + + psSwapChain->ui32RefCount = 1; + psSwapChain->ui32Flags = ui32Flags; + + /* Save pointer in DC structure if ti's shared struct */ + if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_SHARED ) + { + if(! psDCInfo->psDCSwapChainShared ) + { + psDCInfo->psDCSwapChainShared = psSwapChain; + } + else + { + PVRSRV_DC_SWAPCHAIN *psOldHead = psDCInfo->psDCSwapChainShared; + psDCInfo->psDCSwapChainShared = psSwapChain; + psSwapChain->psNext = psOldHead; + } + } + + /* We create swapchain - pui32SwapChainID is output */ + *pui32SwapChainID = psSwapChain->ui32SwapChainID; + + /* return the swapchain reference handle */ + *phSwapChainRef= (IMG_HANDLE)psSwapChainRef; + + return eError; + +ErrorExit: + + for(i=0; i<ui32BufferCount; i++) + { + if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + + if(psQueue) + { + PVRSRVDestroyCommandQueueKM(psQueue); + } + + if(psSwapChain) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + + return eError; +} + + + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_RECT *psRect) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + psRect); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_RECT *psRect) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + psRect); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_UINT32 ui32CKColour) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + ui32CKColour); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_UINT32 ui32CKColour) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + ui32CKColour); +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef, + IMG_UINT32 *pui32BufferCount, + IMG_HANDLE *phBuffer, + IMG_SYS_PHYADDR *psPhyAddr) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS]; + PVRSRV_ERROR eError; + IMG_UINT32 i; + + if(!hDeviceKM || !hSwapChainRef || !phBuffer || !psPhyAddr) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain; + + /* call into the display device driver to get info */ + eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + pui32BufferCount, + ahExtBuffer); + + PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS); + + /* + populate the srvkm's buffer structure with the 3rd party buffer handles + and return the services buffer handles + */ + for(i=0; i<*pui32BufferCount; i++) + { + psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i]; + phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i]; + } + +#if defined(SUPPORT_GET_DC_BUFFERS_SYS_PHYADDRS) + for(i = 0; i < *pui32BufferCount; i++) + { + IMG_UINT32 ui32ByteSize, ui32TilingStride; + IMG_SYS_PHYADDR *pPhyAddr; + IMG_BOOL bIsContiguous; + IMG_HANDLE hOSMapInfo; + IMG_VOID *pvVAddr; + + eError = psDCInfo->psFuncTable->pfnGetBufferAddr(psDCInfo->hExtDevice, + ahExtBuffer[i], + &pPhyAddr, + &ui32ByteSize, + &pvVAddr, + &hOSMapInfo, + &bIsContiguous, + &ui32TilingStride); + if(eError != PVRSRV_OK) + { + break; + } + + psPhyAddr[i] = *pPhyAddr; + } +#endif /* defined(SUPPORT_GET_DC_BUFFERS_SYS_PHYADDRS) */ + + return eError; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hBuffer, + IMG_UINT32 ui32SwapInterval, + IMG_HANDLE hPrivateTag, + IMG_UINT32 ui32ClipRectCount, + IMG_RECT *psClipRect) +{ + PVRSRV_ERROR eError; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_BUFFER *psBuffer; + PVRSRV_QUEUE_INFO *psQueue; + DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; + IMG_UINT32 i; + IMG_BOOL bAddReferenceToLast = IMG_TRUE; + IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND; + IMG_UINT32 ui32NumSrcSyncs = 1; + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; + PVRSRV_COMMAND *psCommand; + SYS_DATA *psSysData; + + if(!hDeviceKM || !hBuffer || !psClipRect) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBuffer = (PVRSRV_DC_BUFFER*)hBuffer; + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* Validate swap interval against limits */ + if(ui32SwapInterval < psBuffer->psSwapChain->ui32MinSwapInterval || + ui32SwapInterval > psBuffer->psSwapChain->ui32MaxSwapInterval) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid swap interval. Requested %u, Allowed range %u-%u", + ui32SwapInterval, psBuffer->psSwapChain->ui32MinSwapInterval, psBuffer->psSwapChain->ui32MaxSwapInterval)); + return PVRSRV_ERROR_INVALID_SWAPINTERVAL; + } + +#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS) + + if(psDCInfo->psFuncTable->pfnQuerySwapCommandID != IMG_NULL) + { + psDCInfo->psFuncTable->pfnQuerySwapCommandID(psDCInfo->hExtDevice, + psBuffer->psSwapChain->hExtSwapChain, + psBuffer->sDeviceClassBuffer.hExtBuffer, + hPrivateTag, + &ui16SwapCommandID, + &bAddReferenceToLast); + + } + +#endif + + /* get the queue from the buffer structure */ + psQueue = psBuffer->psSwapChain->psQueue; + + /* specify the syncs */ + apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo; + if(bAddReferenceToLast && psBuffer->psSwapChain->psLastFlipBuffer && + psBuffer != psBuffer->psSwapChain->psLastFlipBuffer) + { + apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo; + ui32NumSrcSyncs++; + } + + /* insert the command (header) */ + eError = PVRSRVInsertCommandKM (psQueue, + &psCommand, + psDCInfo->ui32DeviceID, + ui16SwapCommandID, + 0, + IMG_NULL, + ui32NumSrcSyncs, + apsSrcSync, + sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount), + IMG_NULL, + IMG_NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue")); + goto Exit; + } + + /* setup the flip command */ + psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData; + + /* Ext Device Handle */ + psFlipCmd->hExtDevice = psDCInfo->hExtDevice; + + /* Ext SwapChain Handle */ + psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain; + + /* Ext Buffer Handle (Buffer to Flip to) */ + psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer; + + /* private tag */ + psFlipCmd->hPrivateTag = hPrivateTag; + + /* setup the clip rects */ + psFlipCmd->ui32ClipRectCount = ui32ClipRectCount; + /* cliprect memory appends the command structure */ + psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND)); // PRQA S 3305 + /* copy the clip rects */ + for(i=0; i<ui32ClipRectCount; i++) + { + psFlipCmd->psClipRect[i] = psClipRect[i]; + } + + /* number of vsyncs between successive flips */ + psFlipCmd->ui32SwapInterval = ui32SwapInterval; + + SysAcquireData(&psSysData); + + /* Because we might be composing just software surfaces, without + * any SGX renders since the last frame, we won't necessarily + * have cleaned/flushed the CPU caches before the buffers need + * to be displayed. + * + * Doing so now is safe because InsertCommand bumped ROP2 on the + * affected buffers (preventing more SW renders starting) but the + * display won't start to process the buffers until SubmitCommand. + */ + { + if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + OSFlushCPUCacheKM(); + } + else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + OSCleanCPUCacheKM(); + } + + psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE; + } + + /* submit the command */ + eError = PVRSRVSubmitCommandKM (psQueue, psCommand); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command")); + goto Exit; + } + + /* + Schedule an MISR to process it + */ + eError = OSScheduleMISR(psSysData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to schedule MISR")); + goto Exit; + } + + /* update the last flip buffer */ + psBuffer->psSwapChain->psLastFlipBuffer = psBuffer; + +Exit: + + if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) + { + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + +typedef struct _CALLBACK_DATA_ +{ + IMG_PVOID pvPrivData; + IMG_UINT32 ui32PrivDataLength; + IMG_PVOID ppvMemInfos; + IMG_UINT32 ui32NumMemInfos; +} CALLBACK_DATA; + +static IMG_VOID FreePrivateData(IMG_HANDLE hCallbackData) +{ + CALLBACK_DATA *psCallbackData = hCallbackData; + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psCallbackData->ui32PrivDataLength, + psCallbackData->pvPrivData, IMG_NULL); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_VOID *) * psCallbackData->ui32NumMemInfos, + psCallbackData->ppvMemInfos, IMG_NULL); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(CALLBACK_DATA), hCallbackData, IMG_NULL); +} + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSwapToDCBuffer2KM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChain, + IMG_UINT32 ui32SwapInterval, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfos, + PVRSRV_KERNEL_SYNC_INFO **ppsSyncInfos, + IMG_UINT32 ui32NumMemSyncInfos, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength) +{ + PVRSRV_KERNEL_SYNC_INFO **ppsCompiledSyncInfos; + IMG_UINT32 i, ui32NumCompiledSyncInfos; + DISPLAYCLASS_FLIP_COMMAND2 *psFlipCmd; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + PVRSRV_ERROR eError = PVRSRV_OK; + CALLBACK_DATA *psCallbackData; + PVRSRV_QUEUE_INFO *psQueue; + PVRSRV_COMMAND *psCommand; + IMG_PVOID *ppvMemInfos; + SYS_DATA *psSysData; + + if(!hDeviceKM || !hSwapChain || !ppsMemInfos || !ppsSyncInfos || ui32NumMemSyncInfos < 1) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChain)->psSwapChain; + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + + /* Validate swap interval against limits */ + if(ui32SwapInterval < psSwapChain->ui32MinSwapInterval || + ui32SwapInterval > psSwapChain->ui32MaxSwapInterval) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Invalid swap interval. Requested %u, Allowed range %u-%u", + ui32SwapInterval, psSwapChain->ui32MinSwapInterval, psSwapChain->ui32MaxSwapInterval)); + return PVRSRV_ERROR_INVALID_SWAPINTERVAL; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(CALLBACK_DATA), + (IMG_VOID **)&psCallbackData, IMG_NULL, + "PVRSRVSwapToDCBuffer2KM callback data"); + if (eError != PVRSRV_OK) + { + return eError; + } + + psCallbackData->pvPrivData = pvPrivData; + psCallbackData->ui32PrivDataLength = ui32PrivDataLength; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_VOID *) * ui32NumMemSyncInfos, + (IMG_VOID **)&ppvMemInfos, IMG_NULL, + "Swap Command Meminfos") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list")); + psCallbackData->ppvMemInfos = IMG_NULL; + goto Exit; + } + + for(i = 0; i < ui32NumMemSyncInfos; i++) + { + ppvMemInfos[i] = ppsMemInfos[i]; + } + + psCallbackData->ppvMemInfos = ppvMemInfos; + psCallbackData->ui32NumMemInfos = ui32NumMemSyncInfos; + + /* get the queue from the buffer structure */ + psQueue = psSwapChain->psQueue; + +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + if(psSwapChain->ppsLastSyncInfos) + { + IMG_UINT32 ui32NumUniqueSyncInfos = psSwapChain->ui32LastNumSyncInfos; + IMG_UINT32 j; + + for(j = 0; j < psSwapChain->ui32LastNumSyncInfos; j++) + { + for(i = 0; i < ui32NumMemSyncInfos; i++) + { + if(psSwapChain->ppsLastSyncInfos[j] == ppsSyncInfos[i]) + { + psSwapChain->ppsLastSyncInfos[j] = IMG_NULL; + ui32NumUniqueSyncInfos--; + } + } + } + + ui32NumCompiledSyncInfos = ui32NumMemSyncInfos + ui32NumUniqueSyncInfos; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumCompiledSyncInfos, + (IMG_VOID **)&ppsCompiledSyncInfos, IMG_NULL, + "Compiled syncinfos") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list")); + goto Exit; + } + + OSMemCopy(ppsCompiledSyncInfos, ppsSyncInfos, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumMemSyncInfos); + for(j = 0, i = ui32NumMemSyncInfos; j < psSwapChain->ui32LastNumSyncInfos; j++) + { + if(psSwapChain->ppsLastSyncInfos[j]) + { + ppsCompiledSyncInfos[i] = psSwapChain->ppsLastSyncInfos[j]; + i++; + } + } + } + else +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + { + ppsCompiledSyncInfos = ppsSyncInfos; + ui32NumCompiledSyncInfos = ui32NumMemSyncInfos; + } + + /* insert the command (header) */ + eError = PVRSRVInsertCommandKM (psQueue, + &psCommand, + psDCInfo->ui32DeviceID, + DC_FLIP_COMMAND, + 0, + IMG_NULL, + ui32NumCompiledSyncInfos, + ppsCompiledSyncInfos, + sizeof(DISPLAYCLASS_FLIP_COMMAND2), + FreePrivateData, + psCallbackData); + + if (ppsCompiledSyncInfos != ppsSyncInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumCompiledSyncInfos, + (IMG_VOID *)ppsCompiledSyncInfos, + IMG_NULL); + } + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to get space in queue")); + goto Exit; + } + + /* setup the flip command */ + psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND2*)psCommand->pvData; + + /* Ext Device Handle */ + psFlipCmd->hExtDevice = psDCInfo->hExtDevice; + + /* Ext SwapChain Handle */ + psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain; + + /* number of vsyncs between successive flips */ + psFlipCmd->ui32SwapInterval = ui32SwapInterval; + + /* Opaque private data, if supplied */ + psFlipCmd->pvPrivData = pvPrivData; + psFlipCmd->ui32PrivDataLength = ui32PrivDataLength; + + psFlipCmd->ppsMemInfos = (PDC_MEM_INFO *)ppvMemInfos; + psFlipCmd->ui32NumMemInfos = ui32NumMemSyncInfos; + + /* Even though this is "unused", we have to initialize it, + * as the display controller might NULL-test it. + */ + psFlipCmd->hUnused = IMG_NULL; + + SysAcquireData(&psSysData); + + /* Because we might be composing just software surfaces, without + * any SGX renders since the last frame, we won't necessarily + * have cleaned/flushed the CPU caches before the buffers need + * to be displayed. + * + * Doing so now is safe because InsertCommand bumped ROP2 on the + * affected buffers (preventing more SW renders starting) but the + * display won't start to process the buffers until SubmitCommand. + */ + { + if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + OSFlushCPUCacheKM(); + } + else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + OSCleanCPUCacheKM(); + } + + psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE; + } + + /* submit the command */ + eError = PVRSRVSubmitCommandKM (psQueue, psCommand); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to submit command")); + goto Exit; + } + + /* The command has been submitted and so psCallbackData will be freed by the callback */ + psCallbackData = IMG_NULL; + + /* + Schedule an MISR to process it + */ + eError = OSScheduleMISR(psSysData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to schedule MISR")); + goto Exit; + } + +#if !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) + /* Reallocate the syncinfo list if it was too small */ + if (psSwapChain->ui32LastNumSyncInfos < ui32NumMemSyncInfos) + { + if (psSwapChain->ppsLastSyncInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_SYNC_INFO *) * psSwapChain->ui32LastNumSyncInfos, + psSwapChain->ppsLastSyncInfos, IMG_NULL); + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_SYNC_INFO *) * ui32NumMemSyncInfos, + (IMG_VOID **)&psSwapChain->ppsLastSyncInfos, IMG_NULL, + "Last syncinfos") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBuffer2KM: Failed to allocate space for meminfo list")); + goto Exit; + } + } + + psSwapChain->ui32LastNumSyncInfos = ui32NumMemSyncInfos; + + for(i = 0; i < ui32NumMemSyncInfos; i++) + { + psSwapChain->ppsLastSyncInfos[i] = ppsSyncInfos[i]; + } +#endif /* !defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) */ + +Exit: + if (psCallbackData) + { + if(psCallbackData->ppvMemInfos) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(IMG_VOID *) * psCallbackData->ui32NumMemInfos, + psCallbackData->ppvMemInfos, IMG_NULL); + } + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(CALLBACK_DATA), psCallbackData, IMG_NULL); + } + if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) + { + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + + +IMG_EXPORT +PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM, + IMG_HANDLE hSwapChainRef) +{ + PVRSRV_ERROR eError; + PVRSRV_QUEUE_INFO *psQueue; + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DC_SWAPCHAIN *psSwapChain; + PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef; + DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; + IMG_UINT32 ui32NumSrcSyncs = 1; + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; + PVRSRV_COMMAND *psCommand; + IMG_BOOL bAddReferenceToLast = IMG_TRUE; + IMG_UINT16 ui16SwapCommandID = DC_FLIP_COMMAND; + SYS_DATA *psSysData; + + if(!hDeviceKM || !hSwapChainRef) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); + psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef; + psSwapChain = psSwapChainRef->psSwapChain; + + /* + If more then 1 reference to the swapchain exist then + ignore any request to swap to the system buffer + */ + if (psSwapChain->ui32RefCount > 1) + { + return PVRSRV_OK; + } + + /* get the queue from the buffer structure */ + psQueue = psSwapChain->psQueue; + +#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS) + + if(psDCInfo->psFuncTable->pfnQuerySwapCommandID != IMG_NULL) + { + psDCInfo->psFuncTable->pfnQuerySwapCommandID(psDCInfo->hExtDevice, + psSwapChain->hExtSwapChain, + psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer, + 0, + &ui16SwapCommandID, + &bAddReferenceToLast); + + } + +#endif + + /* specify the syncs */ + apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo; + if(bAddReferenceToLast && psSwapChain->psLastFlipBuffer) + { + /* Make sure we don't make a double dependency on the same server */ + if (apsSrcSync[0] != psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo) + { + apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo; + ui32NumSrcSyncs++; + } + } + + /* insert the command (header) */ + eError = PVRSRVInsertCommandKM (psQueue, + &psCommand, + psDCInfo->ui32DeviceID, + ui16SwapCommandID, + 0, + IMG_NULL, + ui32NumSrcSyncs, + apsSrcSync, + sizeof(DISPLAYCLASS_FLIP_COMMAND), + IMG_NULL, + IMG_NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue")); + goto Exit; + } + + /* setup the flip command */ + psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData; + + /* Ext Device Handle */ + psFlipCmd->hExtDevice = psDCInfo->hExtDevice; + + /* Ext SwapChain Handle */ + psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain; + + /* Ext Buffer Handle (Buffer to Flip to) */ + psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer; + + /* private tag */ + psFlipCmd->hPrivateTag = IMG_NULL; + + /* setup the clip rects */ + psFlipCmd->ui32ClipRectCount = 0; + + psFlipCmd->ui32SwapInterval = 1; + + /* submit the command */ + eError = PVRSRVSubmitCommandKM (psQueue, psCommand); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command")); + goto Exit; + } + + /* Schedule an MISR to process it */ + SysAcquireData(&psSysData); + eError = OSScheduleMISR(psSysData); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to schedule MISR")); + goto Exit; + } + + /* update the last flip buffer */ + psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer; + + eError = PVRSRV_OK; + +Exit: + + if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) + { + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterSystemISRHandler + + @Description + + registers an external ISR to be called of the back of a system ISR + + @Input ppfnISRHandler : ISR pointer + + @Input hISRHandlerData : Callback data + + @Input ui32ISRSourceMask : ISR Mask + + @Input ui32DeviceID : unique device key + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER pfnISRHandler, + IMG_VOID *pvISRHandlerData, + IMG_UINT32 ui32ISRSourceMask, + IMG_UINT32 ui32DeviceID) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psDevNode; + + PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask); + + SysAcquireData(&psSysData); + + /* Find Dev Node (just using the device id, ignore the class) */ + psDevNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DeviceID, + IMG_TRUE); + + if (psDevNode == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get psDevNode")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + + /* set up data before enabling the ISR */ + psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData; + + /* enable the ISR */ + psDevNode->pfnDeviceISR = pfnISRHandler; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVSetDCState_ForEachVaCb + + @Description + + If the device node is a display, calls its set state function. + + @Input psDeviceNode - the device node + va - variable argument list with: + ui32State - the state to be set. + +******************************************************************************/ +static +IMG_VOID PVRSRVSetDCState_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + IMG_UINT32 ui32State; + ui32State = va_arg(va, IMG_UINT32); + + if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) + { + psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice; + if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice) + { + psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State); + } + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVSetDCState + + @Description + + Calls the display driver(s) to put them into the specified state. + + @Input ui32State: new DC state - one of DC_STATE_* + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State) +{ +/* PVRSRV_DISPLAYCLASS_INFO *psDCInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; */ + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList, + &PVRSRVSetDCState_ForEachVaCb, + ui32State); +} + +static PVRSRV_ERROR +PVRSRVDCMemInfoGetCpuVAddr(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, + IMG_CPU_VIRTADDR *pVAddr) +{ + *pVAddr = psKernelMemInfo->pvLinAddrKM; + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PVRSRVDCMemInfoGetCpuPAddr(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, + IMG_SIZE_T uByteOffset, IMG_CPU_PHYADDR *pPAddr) +{ + *pPAddr = OSMemHandleToCpuPAddr(psKernelMemInfo->sMemBlk.hOSMemHandle, uByteOffset); + return PVRSRV_OK; +} + +static PVRSRV_ERROR +PVRSRVDCMemInfoGetByteSize(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, + IMG_SIZE_T *uByteSize) +{ + *uByteSize = psKernelMemInfo->uAllocSize; + return PVRSRV_OK; +} + +static IMG_BOOL +PVRSRVDCMemInfoIsPhysContig(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + return OSMemHandleIsPhysContig(psKernelMemInfo->sMemBlk.hOSMemHandle); +} + +static PVRSRV_ERROR PVRSRVDCMemInfoGetBvHandle(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, IMG_VOID **handle) +{ +#if !defined(CONFIG_GCBV) + *handle = NULL; + return PVRSRV_ERROR_NOT_SUPPORTED; +#else + *handle = gc_meminfo_to_hndl(psKernelMemInfo); + return PVRSRV_OK; +#endif +} + +/*! +****************************************************************************** + + @Function PVRSRVDCMemInfoGetCpuMultiPlanePAddr + + @Description returns physical addresses of a multi-plane buffer + + + @Input psKernelMemInfo - Pointer to Kernel Memory Info structure + puPlaneByteOffsets - requested offset inside the plane. + If the array is a NULL pointer, 0 requested offsets + are assumed for all planes; + pui32NumAddrOffsets - specifying the size of the user array. + If the array is smaller than the number of the planes + for this buffer, the correct size will be set and an + error returned back; + +@Output pPlanePAddrs - array of plane physical addresses of the returned size + in pui32NumAddrOffsets; + pui32NumAddrOffsets - contains the real number of planes for the buffer; + +@Return IMG_INT32 : size of the entire buffer or negative number on ERROR + +******************************************************************************/ +static IMG_INT32 +PVRSRVDCMemInfoGetCpuMultiPlanePAddr(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo, + IMG_SIZE_T* puPlaneByteOffsets, IMG_CPU_PHYADDR* pPlanePAddrs, + IMG_UINT32* pui32NumAddrOffsets) +{ + IMG_UINT32 aui32PlaneAddressOffsets[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES]; + IMG_INT32 i32Ret; + IMG_UINT32 i; + + i32Ret = OSGetMemMultiPlaneInfo(psKernelMemInfo->sMemBlk.hOSMemHandle, + aui32PlaneAddressOffsets, + pui32NumAddrOffsets); + + if((i32Ret < 0) || (pPlanePAddrs == IMG_NULL)) + return i32Ret; + + for (i = 0; i < *pui32NumAddrOffsets; i++) + { + IMG_SIZE_T uiReqByteOffsets = puPlaneByteOffsets ? puPlaneByteOffsets[i] : 0; + + uiReqByteOffsets += aui32PlaneAddressOffsets[i]; + + pPlanePAddrs[i] = OSMemHandleToCpuPAddr(psKernelMemInfo->sMemBlk.hOSMemHandle, uiReqByteOffsets); + } + + return i32Ret; +} + +/*! +****************************************************************************** + + @Function PVRGetDisplayClassJTable + + @Description + + Sets up function table for 3rd party Display Class Device to call through + + @Input psJTable : pointer to function pointer table memory + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable) +{ + psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE); + psJTable->pfnPVRSRVRegisterDCDevice = &PVRSRVRegisterDCDeviceKM; + psJTable->pfnPVRSRVRemoveDCDevice = &PVRSRVRemoveDCDeviceKM; + psJTable->pfnPVRSRVOEMFunction = &SysOEMFunction; + psJTable->pfnPVRSRVRegisterCmdProcList = &PVRSRVRegisterCmdProcListKM; + psJTable->pfnPVRSRVRemoveCmdProcList = &PVRSRVRemoveCmdProcListKM; +#if defined(SUPPORT_MISR_IN_THREAD) + psJTable->pfnPVRSRVCmdComplete = &OSVSyncMISR; +#else + psJTable->pfnPVRSRVCmdComplete = &PVRSRVCommandCompleteKM; +#endif + psJTable->pfnPVRSRVRegisterSystemISRHandler = &PVRSRVRegisterSystemISRHandler; + psJTable->pfnPVRSRVRegisterPowerDevice = &PVRSRVRegisterPowerDevice; +#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS) + psJTable->pfnPVRSRVFreeCmdCompletePacket = &PVRSRVFreeCommandCompletePacketKM; +#endif + psJTable->pfnPVRSRVDCMemInfoGetCpuVAddr = &PVRSRVDCMemInfoGetCpuVAddr; + psJTable->pfnPVRSRVDCMemInfoGetCpuPAddr = &PVRSRVDCMemInfoGetCpuPAddr; + psJTable->pfnPVRSRVDCMemInfoGetByteSize = &PVRSRVDCMemInfoGetByteSize; + psJTable->pfnPVRSRVDCMemInfoIsPhysContig = &PVRSRVDCMemInfoIsPhysContig; + psJTable->pfnPVRSRVDCMemInfoGetBvHandle = &PVRSRVDCMemInfoGetBvHandle; + psJTable->pfnPVRSRVDCMemInfoGetCpuMultiPlanePAddr = PVRSRVDCMemInfoGetCpuMultiPlanePAddr; + return IMG_TRUE; +} + + + +/****************************************************************************** + + @Function PVRSRVCloseBCDeviceKM + + @Description + + Closes a connection to the Buffer Class device + + @Input hDeviceKM : device handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE hDeviceKM) +{ + PVRSRV_ERROR eError; + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + + psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM; + + /* Remove the item from the resman list and trigger the callback. */ + eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem, CLEANUP_WITH_POLL); + + return eError; +} + + +static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + IMG_UINT32 i; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam; + + psBCInfo = psBCPerContextInfo->psBCInfo; + + for (i = 0; i < psBCInfo->ui32BufferCount; i++) + { + if (psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "CloseBCDeviceCallBack: buffer %d (0x%p) still mapped (ui32MemMapRefCount = %d)", + i, + &psBCInfo->psBuffer[i].sDeviceClassBuffer, + psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount)); + return PVRSRV_ERROR_STILL_MAPPED; + } + } + + psBCInfo->ui32RefCount--; + if(psBCInfo->ui32RefCount == 0) + { + /* close the external device */ + psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->ui32DeviceID, psBCInfo->hExtDevice); + + /* free syncinfos */ + for(i=0; i<psBCInfo->ui32BufferCount; i++) + { + if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + + /* free buffers */ + if(psBCInfo->psBuffer) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER) * psBCInfo->ui32BufferCount, psBCInfo->psBuffer, IMG_NULL); + psBCInfo->psBuffer = IMG_NULL; + } + } + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), psBCPerContextInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVOpenBCDeviceKM + + @Description + + Opens a connection to the Buffer Class device, associating the connection + with a Device Memory Context for a services managed device + + @Input psPerProc : Per-process data + @Input ui32DeviceID : unique device index + @Input hDevCookie : devcookie used to derive the Device Memory + Context into BC surfaces will be mapped into + @Outut phDeviceKM : handle to the DC device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVOpenBCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32DeviceID, + IMG_HANDLE hDevCookie, + IMG_HANDLE *phDeviceKM) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + IMG_UINT32 i; + PVRSRV_ERROR eError; + + if(!phDeviceKM || !hDevCookie) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* find the matching devicenode */ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DeviceID, + IMG_FALSE, + PVRSRV_DEVICE_CLASS_BUFFER); + if (!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID)); + return PVRSRV_ERROR_NO_DEVICENODE_FOUND; + } + psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice; + +/* +FoundDevice: +*/ + /* + Allocate the per-context BC Info before calling the external device, + to make error handling easier. + */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psBCPerContextInfo), + (IMG_VOID **)&psBCPerContextInfo, IMG_NULL, + "Buffer Class per Context Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo)); + + if(psBCInfo->ui32RefCount++ == 0) + { + BUFFER_INFO sBufferInfo; + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* store the device kernel context to map into */ + psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext; + + /* open the external device */ + eError = psBCInfo->psFuncTable->pfnOpenBCDevice(ui32DeviceID, &psBCInfo->hExtDevice); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device")); + return eError; + } + + /* get information about the buffers */ + eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info")); + return eError; + } + + /* interpret and store info */ + psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount; + + /* allocate BC buffers */ + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount, + (IMG_VOID **)&psBCInfo->psBuffer, + IMG_NULL, + "Array of Buffer Class Buffer"); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers")); + return eError; + } + OSMemSet (psBCInfo->psBuffer, + 0, + sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount); + + for(i=0; i<psBCInfo->ui32BufferCount; i++) + { + /* create a syncinfo for the device's system surface */ + eError = PVRSRVAllocSyncInfoKM(IMG_NULL, + psBCInfo->hDevMemContext, + &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc")); + goto ErrorExit; + } + + /* + get the buffers from the buffer class + drivers by index, passing-in the syncdata objects + */ + eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice, + i, + psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData, + &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers")); + goto ErrorExit; + } + + /* setup common device class info */ + psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr; + psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext; + psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice; + psBCInfo->psBuffer[i].sDeviceClassBuffer.ui32MemMapRefCount = 0; + } + } + + psBCPerContextInfo->psBCInfo = psBCInfo; + psBCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_BUFFERCLASS_DEVICE, + psBCPerContextInfo, + 0, + &CloseBCDeviceCallBack); + + /* return a reference to the BCPerContextInfo */ + *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo; + + return PVRSRV_OK; + +ErrorExit: + + /* free syncinfos */ + for(i=0; i<psBCInfo->ui32BufferCount; i++) + { + if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo, IMG_NULL); + } + } + + /* free buffers */ + if(psBCInfo->psBuffer) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL); + psBCInfo->psBuffer = IMG_NULL; + } + + return eError; +} + + + + +/****************************************************************************** + + @Function PVRSRVGetBCInfoKM + + @Description + + Gets Buffer Class device Info + + @Input hDeviceKM : device handle + @Output psBufferInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM, + BUFFER_INFO *psBufferInfo) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + PVRSRV_ERROR eError; + + if(!hDeviceKM || !psBufferInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM); + + eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo); + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info")); + return eError; + } + + return PVRSRV_OK; +} + + +/****************************************************************************** + + @Function PVRSRVGetBCBufferKM + + @Description + + Gets Buffer Class Buffer Handle + + @Input hDeviceKM : device handle + @Output psBufferInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM, + IMG_UINT32 ui32BufferIndex, + IMG_HANDLE *phBuffer) +{ + PVRSRV_BUFFERCLASS_INFO *psBCInfo; + + if(!hDeviceKM || !phBuffer) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM); + + if(ui32BufferIndex < psBCInfo->ui32BufferCount) + { + *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex]; + } + else + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRGetBufferClassJTable + + @Description + + Sets up function table for 3rd party Buffer Class Device to call through + + @Input psJTable : pointer to function pointer table memory + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable) +{ + psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE); + + psJTable->pfnPVRSRVRegisterBCDevice = &PVRSRVRegisterBCDeviceKM; + psJTable->pfnPVRSRVScheduleDevices = &PVRSRVScheduleDevicesKM; + psJTable->pfnPVRSRVRemoveBCDevice = &PVRSRVRemoveBCDeviceKM; + + return IMG_TRUE; +} + +/****************************************************************************** + End of file (deviceclass.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/deviceid.h b/pvr-source/services4/srvkm/common/deviceid.h new file mode 100644 index 0000000..1cf9f0f --- /dev/null +++ b/pvr-source/services4/srvkm/common/deviceid.h @@ -0,0 +1,51 @@ +/*************************************************************************/ /*! +@Title Device ID helpers +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __DEVICEID_H__ +#define __DEVICEID_H__ + +#include "services.h" +#include "syscommon.h" + +PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID); +PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID); + +#endif /* __DEVICEID_H__ */ diff --git a/pvr-source/services4/srvkm/common/devicemem.c b/pvr-source/services4/srvkm/common/devicemem.c new file mode 100644 index 0000000..872c0ba --- /dev/null +++ b/pvr-source/services4/srvkm/common/devicemem.c @@ -0,0 +1,2580 @@ +/*************************************************************************/ /*! +@Title Device addressable memory functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device addressable memory APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <stddef.h> + +#include "services_headers.h" +#include "buffer_manager.h" +#include "pdump_km.h" +#include "pvr_bridge_km.h" +#include "osfunc.h" +#if defined(CONFIG_GCBV) +#include "gc_bvmapping.h" +#endif + +#if defined(SUPPORT_ION) +#include "ion.h" +#include "env_perproc.h" +#endif + +/* local function prototypes */ +static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo); + +/* local structures */ + +/* + structure stored in resman to store references + to the SRC and DST meminfo +*/ +typedef struct _RESMAN_MAP_DEVICE_MEM_DATA_ +{ + /* the DST meminfo created by the map */ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + /* SRC meminfo */ + PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo; +} RESMAN_MAP_DEVICE_MEM_DATA; + +/* + map device class resman memory storage structure +*/ +typedef struct _PVRSRV_DC_MAPINFO_ +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32RangeIndex; + IMG_UINT32 ui32TilingStride; + PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer; +} PVRSRV_DC_MAPINFO; + +static IMG_UINT32 g_ui32SyncUID = 0; + +/*! +****************************************************************************** + + @Function PVRSRVGetDeviceMemHeapsKM + + @Description + + Gets the device shared memory heaps + + @Input hDevCookie : + @Output phDevMemContext : ptr to handle to memory context + @Output psHeapInfo : ptr to array of heap info + + @Return PVRSRV_DEVICE_NODE, valid devnode or IMG_NULL + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie, +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_HEAP_INFO_KM *psHeapInfo) +#else + PVRSRV_HEAP_INFO *psHeapInfo) +#endif +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32HeapCount; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_UINT32 i; + + if (hDevCookie == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* Setup useful pointers */ + ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + + /* check we don't exceed the max number of heaps */ + PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); + + /* retrieve heap information */ + for(i=0; i<ui32HeapCount; i++) + { + /* return information about the heap */ + psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; + psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap; + psHeapInfo[i].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase; + psHeapInfo[i].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize; + psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; + /* (XTileStride > 0) denotes a tiled heap */ + psHeapInfo[i].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; + } + + for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++) + { + OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo)); + psHeapInfo[i].ui32HeapID = (IMG_UINT32)PVRSRV_UNDEFINED_HEAP_ID; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVCreateDeviceMemContextKM + + @Description + + Creates a device memory context + + @Input hDevCookie : + @Input psPerProc : Per-process data + @Output phDevMemContext : ptr to handle to memory context + @Output pui32ClientHeapCount : ptr to heap count + @Output psHeapInfo : ptr to array of heap info + + @Return PVRSRV_DEVICE_NODE, valid devnode or IMG_NULL + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE *phDevMemContext, + IMG_UINT32 *pui32ClientHeapCount, +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_HEAP_INFO_KM *psHeapInfo, +#else + PVRSRV_HEAP_INFO *psHeapInfo, +#endif + IMG_BOOL *pbCreated, + IMG_BOOL *pbShared) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_HANDLE hDevMemContext; + IMG_HANDLE hDevMemHeap; + IMG_DEV_PHYADDR sPDDevPAddr; + IMG_UINT32 i; + +#if !defined(PVR_SECURE_HANDLES) && !defined (SUPPORT_SID_INTERFACE) + PVR_UNREFERENCED_PARAMETER(pbShared); +#endif + + if (hDevCookie == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* + Setup useful pointers + */ + ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + + /* + check we don't exceed the max number of heaps + */ + PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); + + /* + Create a memory context for the caller + */ + hDevMemContext = BM_CreateContext(psDeviceNode, + &sPDDevPAddr, + psPerProc, + pbCreated); + if (hDevMemContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* create the per context heaps */ + for(i=0; i<ui32HeapCount; i++) + { + switch(psDeviceMemoryHeap[i].DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + /* return information about the heap */ + psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; + psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap; + psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase; + psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize; + psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; + #if defined(SUPPORT_MEMORY_TILING) + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; + #else + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = 0; + #endif + +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + pbShared[ui32ClientHeapCount] = IMG_TRUE; +#endif + ui32ClientHeapCount++; + break; + } + case DEVICE_MEMORY_HEAP_PERCONTEXT: + { + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, + &psDeviceMemoryHeap[i]); + if (hDevMemHeap == IMG_NULL) + { + BM_DestroyContext(hDevMemContext, IMG_NULL); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + else + { + hDevMemHeap = IMG_NULL; + } + + /* return information about the heap */ + psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; + psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap; + psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase; + psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize; + psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; + #if defined(SUPPORT_MEMORY_TILING) + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; + #else + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = 0; + #endif +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + pbShared[ui32ClientHeapCount] = IMG_FALSE; +#endif + + ui32ClientHeapCount++; + break; + } + } + } + + /* return shared_exported and per context heap information to the caller */ + *pui32ClientHeapCount = ui32ClientHeapCount; + *phDevMemContext = hDevMemContext; + + return PVRSRV_OK; +} + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_BOOL *pbDestroyed) +{ + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + return BM_DestroyContext(hDevMemContext, pbDestroyed); +} + + + + +/*! +****************************************************************************** + + @Function PVRSRVGetDeviceMemHeapInfoKM + + @Description + + gets heap info + + @Input hDevCookie : + @Input hDevMemContext : ptr to handle to memory context + @Output pui32ClientHeapCount : ptr to heap count + @Output psHeapInfo : ptr to array of heap info + + @Return + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_UINT32 *pui32ClientHeapCount, +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_HEAP_INFO_KM *psHeapInfo, +#else + PVRSRV_HEAP_INFO *psHeapInfo, +#endif + IMG_BOOL *pbShared) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_HANDLE hDevMemHeap; + IMG_UINT32 i; + +#if !defined(PVR_SECURE_HANDLES) && !defined (SUPPORT_SID_INTERFACE) + PVR_UNREFERENCED_PARAMETER(pbShared); +#endif + + if (hDevCookie == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + /* + Setup useful pointers + */ + ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + + /* + check we don't exceed the max number of heaps + */ + PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS); + + /* create the per context heaps */ + for(i=0; i<ui32HeapCount; i++) + { + switch(psDeviceMemoryHeap[i].DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + /* return information about the heap */ + psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; + psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap; + psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase; + psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize; + psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + pbShared[ui32ClientHeapCount] = IMG_TRUE; +#endif + ui32ClientHeapCount++; + break; + } + case DEVICE_MEMORY_HEAP_PERCONTEXT: + { + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, + &psDeviceMemoryHeap[i]); + + if (hDevMemHeap == IMG_NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + else + { + hDevMemHeap = IMG_NULL; + } + + /* return information about the heap */ + psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID; + psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap; + psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase; + psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize; + psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs; + psHeapInfo[ui32ClientHeapCount].ui32XTileStride = psDeviceMemoryHeap[i].ui32XTileStride; +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) + pbShared[ui32ClientHeapCount] = IMG_FALSE; +#endif + + ui32ClientHeapCount++; + break; + } + } + } + + /* return shared_exported and per context heap information to the caller */ + *pui32ClientHeapCount = ui32ClientHeapCount; + + return PVRSRV_OK; +} + +static PVRSRV_ERROR UpdateDeviceMemoryPlaneOffsets(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if(psMemInfo->ui32Flags & PVRSRV_MEM_ION) + { + + PVRSRV_MEMBLK *psMemBlock = &(psMemInfo->sMemBlk); + IMG_UINT32 ui32AddressOffsets[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES]; + IMG_UINT32 ui32NumAddrOffsets = PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES; + + IMG_INT32 retSize = OSGetMemMultiPlaneInfo(psMemBlock->hOSMemHandle, + ui32AddressOffsets, &ui32NumAddrOffsets); + + if((retSize > 0) && ui32NumAddrOffsets) + { + int i; + for(i = 0; i < PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES; i++) + { + if(i < ui32NumAddrOffsets) + psMemInfo->planeOffsets[i] = ui32AddressOffsets[i]; + else + psMemInfo->planeOffsets[i] = (IMG_INT32)-1; + } + } + } + + return PVRSRV_OK; + +} + +/*! +****************************************************************************** + + @Function AllocDeviceMem + + @Description + + Allocates device memory + + @Input hDevCookie : + + @Input hDevMemHeap + + @Input ui32Flags : Some combination of PVRSRV_MEM_ flags + + @Input ui32Size : Number of bytes to allocate + + @Input ui32Alignment : Alignment of allocation + + @Input pvPrivData : Opaque private data passed through to allocator + + @Input ui32PrivDataLength : Length of opaque private data + + @Output **ppsMemInfo : On success, receives a pointer to the created MEM_INFO structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + BM_HANDLE hBuffer; + /* Pointer to implementation details within the mem_info */ + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + *ppsMemInfo = IMG_NULL; + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + + psMemBlock = &(psMemInfo->sMemBlk); + + /* ION and DYNAMIC re-mapping + * require the PAGEABLE FLAG set + */ + if (ui32Flags & (PVRSRV_MEM_ION | + PVRSRV_HAP_NO_GPU_VIRTUAL_ON_ALLOC)) + { + ui32Flags |= PVRSRV_HAP_GPU_PAGEABLE; + } + + /* BM supplied Device Virtual Address with physical backing RAM */ + psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION; + + bBMError = BM_Alloc (hDevMemHeap, + IMG_NULL, + ui32Size, + &psMemInfo->ui32Flags, + IMG_CAST_TO_DEVVADDR_UINT(ui32Alignment), + pvPrivData, + ui32PrivDataLength, + ui32ChunkSize, + ui32NumVirtChunks, + ui32NumPhysChunks, + pabMapChunk, + &hBuffer); + + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed")); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* Fill in the public fields of the MEM_INFO structure */ + + psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); + + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + + if (ui32Flags & PVRSRV_MEM_SPARSE) + { + psMemInfo->uAllocSize = ui32ChunkSize * ui32NumVirtChunks; + } + else + { + psMemInfo->uAllocSize = ui32Size; + } + + /* Clear the Backup buffer pointer as we do not have one at this point. We only allocate this as we are going up/down */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* Update the Multimedia plane offsets */ + UpdateDeviceMemoryPlaneOffsets(psMemInfo); + + /* + * Setup the output. + */ + *ppsMemInfo = psMemInfo; + + /* + * And I think we're done for now.... + */ + return (PVRSRV_OK); +} + +static PVRSRV_ERROR FreeDeviceMem2(PVRSRV_KERNEL_MEM_INFO *psMemInfo, PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin) +{ + BM_HANDLE hBuffer; + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + hBuffer = psMemInfo->sMemBlk.hBuffer; + + switch(eCallbackOrigin) + { + case PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR: + BM_Free(hBuffer, psMemInfo->ui32Flags); + break; + case PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER: + BM_FreeExport(hBuffer, psMemInfo->ui32Flags); + break; + default: + break; + } + + if (psMemInfo->pvSysBackupBuffer && + eCallbackOrigin == PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->uAllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL); + psMemInfo->pvSysBackupBuffer = IMG_NULL; + } + + if (psMemInfo->ui32RefCount == 0) + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + + return(PVRSRV_OK); +} + +static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + BM_HANDLE hBuffer; + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + hBuffer = psMemInfo->sMemBlk.hBuffer; + + BM_Free(hBuffer, psMemInfo->ui32Flags); + + if(psMemInfo->pvSysBackupBuffer) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->uAllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL); + psMemInfo->pvSysBackupBuffer = IMG_NULL; + } + + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + + return(PVRSRV_OK); +} + + +/*! +****************************************************************************** + + @Function PVRSRVAllocSyncInfoKM + + @Description + + Allocates a sync info + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo) +{ + IMG_HANDLE hSyncDevMemHeap; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + BM_CONTEXT *pBMContext; + PVRSRV_ERROR eError; + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo; + PVRSRV_SYNC_DATA *psSyncData; + + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_SYNC_INFO), + (IMG_VOID **)&psKernelSyncInfo, IMG_NULL, + "Kernel Synchronization Info"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSAtomicAlloc(&psKernelSyncInfo->pvRefCount); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to allocate atomic")); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + + /* Get the devnode from the devheap */ + pBMContext = (BM_CONTEXT*)hDevMemContext; + psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo; + + /* and choose a heap for the syncinfo */ + hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap; + + /* + Cache consistent flag would be unnecessary if the heap attributes were + changed to specify it. + */ + eError = AllocDeviceMem(hDevCookie, + hSyncDevMemHeap, + PVRSRV_MEM_CACHE_CONSISTENT, + sizeof(PVRSRV_SYNC_DATA), + sizeof(IMG_UINT32), + IMG_NULL, + 0, + 0, 0, 0, IMG_NULL, /* Sparse mapping args, not required */ + &psKernelSyncInfo->psSyncDataMemInfoKM); + + if (eError != PVRSRV_OK) + { + + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory")); + OSAtomicFree(psKernelSyncInfo->pvRefCount); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* init sync data */ + psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM; + psSyncData = psKernelSyncInfo->psSyncData; + + psSyncData->ui32WriteOpsPending = 0; + psSyncData->ui32WriteOpsComplete = 0; + psSyncData->ui32ReadOpsPending = 0; + psSyncData->ui32ReadOpsComplete = 0; + psSyncData->ui32ReadOps2Pending = 0; + psSyncData->ui32ReadOps2Complete = 0; + psSyncData->ui32LastOpDumpVal = 0; + psSyncData->ui32LastReadOpDumpVal = 0; + psSyncData->ui64LastWrite = 0; + +#if defined(PDUMP) + PDUMPCOMMENT("Allocating kernel sync object"); + PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM, + psKernelSyncInfo->psSyncDataMemInfoKM, + 0, + (IMG_UINT32)psKernelSyncInfo->psSyncDataMemInfoKM->uAllocSize, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM)); +#endif + + psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete); + psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete); + psKernelSyncInfo->sReadOps2CompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOps2Complete); + psKernelSyncInfo->ui32UID = g_ui32SyncUID++; + + /* syncinfo meminfo has no syncinfo! */ + psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL; + + OSAtomicInc(psKernelSyncInfo->pvRefCount); + + /* return result */ + *ppsKernelSyncInfo = psKernelSyncInfo; + + return PVRSRV_OK; +} + +IMG_EXPORT +IMG_VOID PVRSRVAcquireSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + OSAtomicInc(psKernelSyncInfo->pvRefCount); +} + +/*! +****************************************************************************** + + @Function PVRSRVFreeSyncInfoKM + + @Description + + Frees a sync info + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +IMG_VOID IMG_CALLCONV PVRSRVReleaseSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo) +{ + if (OSAtomicDecAndTest(psKernelSyncInfo->pvRefCount)) + { + FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM); + + /* Catch anyone who is trying to access the freed structure */ + psKernelSyncInfo->psSyncDataMemInfoKM = IMG_NULL; + psKernelSyncInfo->psSyncData = IMG_NULL; + OSAtomicFree(psKernelSyncInfo->pvRefCount); + (IMG_VOID)OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + + @Function freeExternal + + @Description + + Code for freeing meminfo elements that are specific to external types memory + + @Input psMemInfo : Kernel meminfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ + +static IMG_VOID freeExternal(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + IMG_HANDLE hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem; + + /* free the page addr array if req'd */ + if(psMemInfo->sMemBlk.psIntSysPAddr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL); + psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL; + } + + /* Mem type dependent stuff */ + if (psMemInfo->memType == PVRSRV_MEMTYPE_WRAPPED) + { + if(hOSWrapMem) + { + OSReleasePhysPageAddr(hOSWrapMem); + } + } +#if defined(SUPPORT_ION) + else if (psMemInfo->memType == PVRSRV_MEMTYPE_ION) + { + if (hOSWrapMem) + { + IonUnimportBufferAndReleasePhysAddr(hOSWrapMem); + } + } +#endif +} + +/*! +****************************************************************************** + + @Function FreeMemCallBackCommon + + @Description + + Common code for freeing device mem (called for freeing, unwrapping and unmapping) + + @Input psMemInfo : Kernel meminfo + @Input ui32Param : packet size + @Input uibFromAllocatorParam : Are we being called by the original allocator? + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR FreeMemCallBackCommon(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN eCallbackOrigin) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + + /* decrement the refcount */ + PVRSRVKernelMemInfoDecRef(psMemInfo); + + /* check no other processes has this meminfo mapped */ + if (psMemInfo->ui32RefCount == 0) + { + if((psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED) != 0) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hMemInfo = 0; +#else + IMG_HANDLE hMemInfo = IMG_NULL; +#endif + + /* find the handle */ + eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE, + &hMemInfo, + psMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: can't find exported meminfo in the global handle list")); + return eError; + } + + /* release the handle */ + eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, + hMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: PVRSRVReleaseHandle failed for exported meminfo")); + return eError; + } + } + + switch(psMemInfo->memType) + { + /* Fall through: Free only what we should for each memory type */ + case PVRSRV_MEMTYPE_WRAPPED: + case PVRSRV_MEMTYPE_ION: + freeExternal(psMemInfo); + case PVRSRV_MEMTYPE_DEVICE: + case PVRSRV_MEMTYPE_DEVICECLASS: + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + break; + default: + PVR_DPF((PVR_DBG_ERROR, "FreeMemCallBackCommon: Unknown memType")); + eError = PVRSRV_ERROR_INVALID_MEMINFO; + } + } + +#if defined(CONFIG_GCBV) + if (psMemInfo->ui32Flags & PVRSRV_MAP_GC_MMU) + gc_bvunmap_meminfo(psMemInfo); +#endif + + /* + * FreeDeviceMem2 will do the right thing, freeing + * the virtual memory info when the allocator calls + * but only releaseing the physical pages when everyone + * is done. + */ + + if (eError == PVRSRV_OK) + { + eError = FreeDeviceMem2(psMemInfo, eCallbackOrigin); + } + + return eError; +} + +/*! +****************************************************************************** + + @Function FreeDeviceMemCallBack + + @Description + + ResMan call back to free device memory + + @Input pvParam : data packet + @Input ui32Param : packet size + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam; + + PVR_UNREFERENCED_PARAMETER(bDummy); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + + +/*! +****************************************************************************** + + @Function PVRSRVFreeDeviceMemKM + + @Description + + Frees memory allocated with PVRAllocDeviceMem, including the mem_info structure + + @Input psMemInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (psMemInfo->sMemBlk.hResItem != IMG_NULL) + { + eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); + } + else + { + /* PVRSRV_MEM_NO_RESMAN */ + eError = FreeDeviceMemCallBack(psMemInfo, 0, CLEANUP_WITH_POLL); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRemapToDevKM + + @Description + + Remaps buffer to GPU virtual address space + + @Input psMemInfo + + @Return PVRSRV_ERROR : 0 means the memory is still unmapped - ERROR, + * bigger than 0 (mapping reference count) - success mapping + * smaller than 0 - PVRSRV error +******************************************************************************/ +IMG_EXPORT +IMG_INT32 IMG_CALLCONV PVRSRVRemapToDevKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, IMG_DEV_VIRTADDR *psDevVAddr) +{ + PVRSRV_MEMBLK *psMemBlock; + IMG_INT32 result; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + if (!psMemInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemapToDevKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psMemBlock = &(psMemInfo->sMemBlk); + + result = BM_RemapToDev(psMemBlock->hBuffer); + + if(result <= 0) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemapToDevKM: could not remap")); + } + + *psDevVAddr = psMemInfo->sDevVAddr = + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(psMemBlock->hBuffer); + + UpdateDeviceMemoryPlaneOffsets(psMemInfo); + + return result; +} + + +/*! +****************************************************************************** + + @Function PVRSRVUnmapFromDevKM + + @Description + + Unmaps buffer from GPU virtual address space + + @Input psMemInfo + + @Return PVRSRV_ERROR : 0 means the memory is unmapped, + * bigger than 0 (mapping reference count) still mapped + * smaller than 0 - PVRSRV error +******************************************************************************/ +IMG_EXPORT +IMG_INT32 IMG_CALLCONV PVRSRVUnmapFromDevKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + PVRSRV_MEMBLK *psMemBlock; + IMG_INT32 result; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + if (!psMemInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVUnmapFromDevKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psMemBlock = &(psMemInfo->sMemBlk); + + result = BM_UnmapFromDev(psMemBlock->hBuffer); + /* 0 means the memory is unmapped, + * bigger than 0 (mapping ref count) still mapped + * smaller than 0 PVRSRV error + */ + if(result < 0) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVUnmapFromDevKM: could not unmap")); + } + + psMemInfo->sDevVAddr = + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(psMemBlock->hBuffer); + + return result; +} + + +/*! +****************************************************************************** + + @Function PVRSRVAllocDeviceMemKM + + @Description + + Allocates device memory + + @Input hDevCookie : + @Input psPerProc : Per-process data + @Input hDevMemHeap + @Input ui32Flags : Some combination of PVRSRV_MEM_ flags + @Input ui32Size : Number of bytes to allocate + @Input ui32Alignment : + @Output **ppsMemInfo : On success, receives a pointer to the created MEM_INFO structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemHeap, + IMG_UINT32 ui32Flags, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Alignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + PVRSRV_ERROR eError; + BM_HEAP *psBMHeap; + IMG_HANDLE hDevMemContext; + + if (!hDevMemHeap || + ((ui32Size == 0) && ((ui32Flags & PVRSRV_MEM_SPARSE) == 0)) || + (((ui32ChunkSize == 0) || (ui32NumVirtChunks == 0) || (ui32NumPhysChunks == 0) || + (pabMapChunk == IMG_NULL )) && (ui32Flags & PVRSRV_MEM_SPARSE))) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Sprase alloc input validation */ + if (ui32Flags & PVRSRV_MEM_SPARSE) + { + IMG_UINT32 i; + IMG_UINT32 ui32Check = 0; + + if (ui32NumVirtChunks < ui32NumPhysChunks) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + for (i=0;i<ui32NumVirtChunks;i++) + { + if (pabMapChunk[i]) + { + ui32Check++; + } + } + if (ui32NumPhysChunks != ui32Check) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + /* FIXME: At the moment we force CACHETYPE override allocations to + * be multiples of PAGE_SIZE and page aligned. If the RA/BM + * is fixed, this limitation can be removed. + * + * INTEGRATION_POINT: HOST_PAGESIZE() is not correct, should be device-specific. + */ + if (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) + { + /* PRQA S 3415 1 */ /* order of evaluation is not important */ + if (((ui32Size % HOST_PAGESIZE()) != 0) || + ((ui32Alignment % HOST_PAGESIZE()) != 0)) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + eError = AllocDeviceMem(hDevCookie, + hDevMemHeap, + ui32Flags, + ui32Size, + ui32Alignment, + pvPrivData, + ui32PrivDataLength, + ui32ChunkSize, + ui32NumVirtChunks, + ui32NumPhysChunks, + pabMapChunk, + &psMemInfo); + + if (eError != PVRSRV_OK) + { + return eError; + } + +#if defined(CONFIG_GCBV) + if (ui32Flags & PVRSRV_MAP_GC_MMU) + gc_bvmap_meminfo(psMemInfo); +#endif + + if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ) + { + psMemInfo->psKernelSyncInfo = IMG_NULL; + } + else + { + /* + allocate a syncinfo but don't register with resman + because the holding devicemem will handle the syncinfo + */ + psBMHeap = (BM_HEAP*)hDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + eError = PVRSRVAllocSyncInfoKM(hDevCookie, + hDevMemContext, + &psMemInfo->psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + goto free_mainalloc; + } + } + + /* + * Setup the output. + */ + *ppsMemInfo = psMemInfo; + + if (ui32Flags & PVRSRV_MEM_NO_RESMAN) + { + psMemInfo->sMemBlk.hResItem = IMG_NULL; + } + else + { + /* register with the resman */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_ALLOCATION, + psMemInfo, + 0, + &FreeDeviceMemCallBack); + if (psMemInfo->sMemBlk.hResItem == IMG_NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto free_mainalloc; + } + } + + /* increment the refcount */ + PVRSRVKernelMemInfoIncRef(psMemInfo); + + psMemInfo->memType = PVRSRV_MEMTYPE_DEVICE; + + /* + * And I think we're done for now.... + */ + return (PVRSRV_OK); + +free_mainalloc: + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + FreeDeviceMem(psMemInfo); + + return eError; +} + +#if defined(SUPPORT_ION) +static PVRSRV_ERROR IonUnmapCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam; + + PVR_UNREFERENCED_PARAMETER(bDummy); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + +/*! +****************************************************************************** + + @Function PVRSRVMapIonHandleKM + + @Description + + Map an ION buffer into the specified device memory context + + @Input psPerProc : PerProcess data + @Input hDevCookie : Device node cookie + @Input hDevMemContext : Device memory context cookie + @Input hIon : Handle to ION buffer + @Input ui32Flags : Mapping flags + @Input ui32Size : Mapping size + @Output ppsKernelMemInfo: Output kernel meminfo if successful + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVMapIonHandleKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_HANDLE hIon, + IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProcEnv = PVRSRVProcessPrivateData(psPerProc); + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psNewKernelMemInfo; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_SYS_PHYADDR *pasSysPhysAddr; + PVRSRV_MEMBLK *psMemBlock; + PVRSRV_ERROR eError; + IMG_HANDLE hDevMemHeap = IMG_NULL; + IMG_HANDLE hPriv; + BM_HANDLE hBuffer; + IMG_UINT32 ui32HeapCount; + IMG_UINT32 ui32PageCount; + IMG_UINT32 i; + IMG_BOOL bAllocSync = (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)?IMG_FALSE:IMG_TRUE; + + if ((hDevCookie == IMG_NULL) || (ui32Size == 0) + || (hDevMemContext == IMG_NULL) || (ppsKernelMemInfo == IMG_NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid params", __FUNCTION__)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie; + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psNewKernelMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: Failed to alloc memory for block", __FUNCTION__)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psNewKernelMemInfo, 0, sizeof(PVRSRV_KERNEL_MEM_INFO)); + + /* Choose the heap to map to */ + ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++) + { + if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32IonHeapID) + { + if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT) + { + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]); + } + else + { + hDevMemHeap = IMG_NULL; + } + } + else + { + hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap; + } + break; + } + } + + if (hDevMemHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get ION heap", __FUNCTION__)); + eError = PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO; + goto exitFailedHeap; + } + + /* Import the ION buffer into our ion_client and DMA map it */ + eError = IonImportBufferAndAquirePhysAddr(psPerProcEnv->psIONClient, + hIon, + &ui32PageCount, + &pasSysPhysAddr, + &psNewKernelMemInfo->pvLinAddrKM, + &hPriv); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get ion buffer/buffer phys addr", __FUNCTION__)); + goto exitFailedHeap; + } + + /* Wrap the returned addresses into our memory context */ + if (!BM_Wrap(hDevMemHeap, + ui32Size, + 0, + IMG_FALSE, + pasSysPhysAddr, + IMG_NULL, + &ui32Flags, /* This function clobbers our bits in ui32Flags */ + &hBuffer)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wrap ion buffer", __FUNCTION__)); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exitFailedWrap; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock = &psNewKernelMemInfo->sMemBlk; + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + psMemBlock->hBuffer = (IMG_HANDLE) hBuffer; + psMemBlock->hOSWrapMem = hPriv; /* Saves creating a new element as we know hOSWrapMem will not be used */ + psMemBlock->psIntSysPAddr = pasSysPhysAddr; + + psNewKernelMemInfo->ui32Flags = ui32Flags; + psNewKernelMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psNewKernelMemInfo->uAllocSize = ui32Size; + psNewKernelMemInfo->memType = PVRSRV_MEMTYPE_ION; + PVRSRVKernelMemInfoIncRef(psNewKernelMemInfo); + + /* Clear the Backup buffer pointer as we do not have one at this point. We only allocate this as we are going up/down */ + psNewKernelMemInfo->pvSysBackupBuffer = IMG_NULL; + + if (!bAllocSync) + { + psNewKernelMemInfo->psKernelSyncInfo = IMG_NULL; + } + else + { + eError = PVRSRVAllocSyncInfoKM(hDevCookie, + hDevMemContext, + &psNewKernelMemInfo->psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + goto exitFailedSync; + } + } + + /* register with the resman */ + psNewKernelMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_ION, + psNewKernelMemInfo, + 0, + &IonUnmapCallback); + if (psNewKernelMemInfo->sMemBlk.hResItem == IMG_NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exitFailedResman; + } + + psNewKernelMemInfo->memType = PVRSRV_MEMTYPE_ION; + + *ppsKernelMemInfo = psNewKernelMemInfo; + return PVRSRV_OK; + +exitFailedResman: + if (psNewKernelMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psNewKernelMemInfo->psKernelSyncInfo, psNewKernelMemInfo); + } +exitFailedSync: + BM_Free(hBuffer, ui32Flags); +exitFailedWrap: + IonUnimportBufferAndReleasePhysAddr(hPriv); + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(IMG_SYS_PHYADDR) * ui32PageCount, + pasSysPhysAddr, + IMG_NULL); +exitFailedHeap: + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + psNewKernelMemInfo, + IMG_NULL); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVUnmapIonHandleKM + + @Description + + Frees an ion buffer mapped with PVRSRVMapIonHandleKM, including the mem_info structure + + @Input psMemInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapIonHandleKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} +#endif /* SUPPORT_ION */ + +/*! +****************************************************************************** + + @Function PVRSRVDissociateDeviceMemKM + + @Description + + Dissociates memory from the process that allocates it. Intended for + transfering the ownership of device memory from a particular process + to the kernel. + + @Input psMemInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie; + + PVR_UNREFERENCED_PARAMETER(hDevCookie); + + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = ResManDissociateRes(psMemInfo->sMemBlk.hResItem, psDeviceNode->hResManContext); + + PVR_ASSERT(eError == PVRSRV_OK); + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVGetFreeDeviceMemKM + + @Description + + Determines how much memory remains available in the system with the specified + capabilities. + + @Input ui32Flags : + + @Output pui32Total : + + @Output pui32Free : + + @Output pui32LargestBlock : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags, + IMG_SIZE_T *pui32Total, + IMG_SIZE_T *pui32Free, + IMG_SIZE_T *pui32LargestBlock) +{ + /* TO BE IMPLEMENTED */ + + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(pui32Total); + PVR_UNREFERENCED_PARAMETER(pui32Free); + PVR_UNREFERENCED_PARAMETER(pui32LargestBlock); + + return PVRSRV_OK; +} + + + + +/*! +****************************************************************************** + @Function PVRSRVUnwrapExtMemoryKM + + @Description On last unwrap of a given meminfo, unmaps physical pages from a + wrapped allocation, and frees the associated device address space. + Note: this can only unmap memory mapped by PVRSRVWrapExtMemory + + @Input psMemInfo - mem info describing the wrapped allocation + @Return None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} + + +/*! +****************************************************************************** + @Function UnwrapExtMemoryCallBack + + @Description Resman callback to unwrap memory + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam; + + PVR_UNREFERENCED_PARAMETER(bDummy); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + + +/*! +****************************************************************************** + @Function PVRSRVWrapExtMemoryKM + + @Description Allocates a Device Virtual Address in the shared mapping heap + and maps physical pages into that allocation. Note, if the pages are + already mapped into the heap, the existing allocation is returned. + + @Input hDevCookie - Device cookie + @Input psPerProc - Per-process data + @Input hDevMemContext - device memory context + @Input uByteSize - Size of allocation + @Input uPageOffset - Offset into the first page of the memory to be wrapped + @Input bPhysContig - whether the underlying memory is physically contiguous + @Input psExtSysPAddr - The list of Device Physical page addresses + @Input pvLinAddr - ptr to buffer to wrap + @Output ppsMemInfo - mem info describing the wrapped allocation + @Return None +******************************************************************************/ + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemContext, + IMG_SIZE_T uByteSize, + IMG_SIZE_T uPageOffset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psExtSysPAddr, + IMG_VOID *pvLinAddr, + IMG_UINT32 ui32Flags, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE(); + IMG_HANDLE hDevMemHeap = IMG_NULL; + PVRSRV_DEVICE_NODE* psDeviceNode; + BM_HANDLE hBuffer; + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + BM_HEAP *psBMHeap; + PVRSRV_ERROR eError; + IMG_VOID *pvPageAlignedCPUVAddr; + IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL; + IMG_HANDLE hOSWrapMem = IMG_NULL; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_UINT32 i; + IMG_SIZE_T uPageCount = 0; + + + psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie; + PVR_ASSERT(psDeviceNode != IMG_NULL); + + if (psDeviceNode == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if(pvLinAddr) + { + /* derive the page offset from the cpu ptr (in case it's not supplied) */ + uPageOffset = (IMG_UINTPTR_T)pvLinAddr & (ui32HostPageSize - 1); + + /* get the pagecount and the page aligned base ptr */ + uPageCount = HOST_PAGEALIGN(uByteSize + uPageOffset) / ui32HostPageSize; + pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvLinAddr - uPageOffset); + + /* allocate array of SysPAddr to hold page addresses */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + uPageCount * sizeof(IMG_SYS_PHYADDR), + (IMG_VOID **)&psIntSysPAddr, IMG_NULL, + "Array of Page Addresses") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr, + uPageCount * ui32HostPageSize, + psIntSysPAddr, + &hOSWrapMem); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY;//FIXME: need better error code + goto ErrorExitPhase1; + } + + /* replace the supplied page address list */ + psExtSysPAddr = psIntSysPAddr; + + /* assume memory is not physically contiguous; + we shouldn't trust what the user says here + */ + bPhysContig = IMG_FALSE; + } + + /* Choose the heap to map to */ + psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++) + { + if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID) + { + if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT) + { + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]); + } + else + { + hDevMemHeap = IMG_NULL; + } + } + else + { + hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap; + } + break; + } + } + + if(hDevMemHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: unable to find mapping heap")); + eError = PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP; + goto ErrorExitPhase2; + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExitPhase2; + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + psMemInfo->ui32Flags = ui32Flags; + + psMemBlock = &(psMemInfo->sMemBlk); + + bBMError = BM_Wrap(hDevMemHeap, + uByteSize, + uPageOffset, + bPhysContig, + psExtSysPAddr, + IMG_NULL, + &psMemInfo->ui32Flags, + &hBuffer); + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: BM_Wrap Failed")); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto ErrorExitPhase3; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + psMemBlock->hOSWrapMem = hOSWrapMem; + psMemBlock->psIntSysPAddr = psIntSysPAddr; + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* Fill in the public fields of the MEM_INFO structure */ + psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psMemInfo->uAllocSize = uByteSize; + + /* Clear the Backup buffer pointer as we do not have one at this point. + We only allocate this as we are going up/down + */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* + allocate a syncinfo but don't register with resman + because the holding devicemem will handle the syncinfo + */ + psBMHeap = (BM_HEAP*)hDevMemHeap; + hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext; + eError = PVRSRVAllocSyncInfoKM(hDevCookie, + hDevMemContext, + &psMemInfo->psKernelSyncInfo); + if(eError != PVRSRV_OK) + { + goto ErrorExitPhase4; + } + + /* increment the refcount */ + PVRSRVKernelMemInfoIncRef(psMemInfo); + + psMemInfo->memType = PVRSRV_MEMTYPE_WRAPPED; + + /* Register Resource */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_WRAP, + psMemInfo, + 0, + &UnwrapExtMemoryCallBack); + + /* return the meminfo */ + *ppsMemInfo = psMemInfo; + + return PVRSRV_OK; + + /* error handling: */ + +ErrorExitPhase4: + if(psMemInfo) + { + FreeDeviceMem(psMemInfo); + /* + FreeDeviceMem will free the meminfo so set + it to NULL to avoid double free below + */ + psMemInfo = IMG_NULL; + } + +ErrorExitPhase3: + if(psMemInfo) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + +ErrorExitPhase2: + if(psIntSysPAddr) + { + OSReleasePhysPageAddr(hOSWrapMem); + } + +ErrorExitPhase1: + if(psIntSysPAddr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, uPageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL); + /*not nulling shared pointer, uninitialized to this point*/ + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVUnmapDeviceMemoryKM + + @Description + Unmaps an existing allocation previously mapped by PVRSRVMapDeviceMemory + + @Input psMemInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} + + +/*! +****************************************************************************** + @Function UnmapDeviceMemoryCallBack + + @Description Resman callback to unmap memory memory previously mapped + from one allocation to another + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_ERROR eError; + RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + if(psMapData->psMemInfo->sMemBlk.psIntSysPAddr) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMapData->psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL); + psMapData->psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL; + } + + if( psMapData->psMemInfo->psKernelSyncInfo ) + { + PVRSRVKernelSyncInfoDecRef(psMapData->psMemInfo->psKernelSyncInfo, psMapData->psMemInfo); + } + + eError = FreeDeviceMem(psMapData->psMemInfo); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free DST meminfo")); + return eError; + } + + /* This will only free the src psMemInfo if we hold the last reference */ + eError = FreeMemCallBackCommon(psMapData->psSrcMemInfo, 0, + PVRSRV_FREE_CALLBACK_ORIGIN_IMPORTER); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVMapDeviceMemoryKM + + @Description + Maps an existing allocation to a specific device address space and heap + Note: it's valid to map from one physical device to another + + @Input psPerProc : Per-process data + @Input psSrcMemInfo + @Input hDstDevMemHeap + @Input ppsDstMemInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo, + IMG_HANDLE hDstDevMemHeap, + PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo) +{ + PVRSRV_ERROR eError; + IMG_UINT32 i; + IMG_SIZE_T uPageCount, uPageOffset; + IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE(); + IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL; + IMG_DEV_PHYADDR sDevPAddr; + BM_BUF *psBuf; + IMG_DEV_VIRTADDR sDevVAddr; + PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; + BM_HANDLE hBuffer; + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_VOID *pvPageAlignedCPUVAddr; + RESMAN_MAP_DEVICE_MEM_DATA *psMapData = IMG_NULL; + + /* check params */ + if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* initialise the Dst Meminfo to NULL*/ + *ppsDstMemInfo = IMG_NULL; + + uPageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1); + uPageCount = HOST_PAGEALIGN(psSrcMemInfo->uAllocSize + uPageOffset) / ui32HostPageSize; + pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psSrcMemInfo->pvLinAddrKM - uPageOffset); + + /* + allocate array of SysPAddr to hold SRC allocation page addresses + */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + uPageCount*sizeof(IMG_SYS_PHYADDR), + (IMG_VOID **)&psSysPAddr, IMG_NULL, + "Array of Page Addresses") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psBuf = psSrcMemInfo->sMemBlk.hBuffer; + + /* get the device node */ + psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode; + + /* build a list of physical page addresses */ + sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - IMG_CAST_TO_DEVVADDR_UINT(uPageOffset); + for(i=0; i<uPageCount; i++) + { + BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr); + + /* save the address */ + psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr); + + /* advance the DevVaddr one page */ + sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize); + } + + /* allocate the resman map data */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(RESMAN_MAP_DEVICE_MEM_DATA), + (IMG_VOID **)&psMapData, IMG_NULL, + "Resource Manager Map Data") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExit; + } + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block")); + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ErrorExit; + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags; + + psMemBlock = &(psMemInfo->sMemBlk); + + bBMError = BM_Wrap(hDstDevMemHeap, + psSrcMemInfo->uAllocSize, + uPageOffset, + IMG_FALSE, + psSysPAddr, + pvPageAlignedCPUVAddr, + &psMemInfo->ui32Flags, + &hBuffer); + + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed")); + eError = PVRSRV_ERROR_BAD_MAPPING; + goto ErrorExit; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* Store page list */ + psMemBlock->psIntSysPAddr = psSysPAddr; + + /* patch up the CPU VAddr into the meminfo */ + psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM; + + /* Fill in the public fields of the MEM_INFO structure */ + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psMemInfo->uAllocSize = psSrcMemInfo->uAllocSize; + psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo; + + /* reference the same ksi that the original meminfo referenced */ + if(psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoIncRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + + /* Clear the Backup buffer pointer as we do not have one at this point. + We only allocate this as we are going up/down + */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* increment our refcount */ + PVRSRVKernelMemInfoIncRef(psMemInfo); + + /* increment the src refcount */ + PVRSRVKernelMemInfoIncRef(psSrcMemInfo); + + /* Tell the buffer manager about the export */ + BM_Export(psSrcMemInfo->sMemBlk.hBuffer); + + psMemInfo->memType = PVRSRV_MEMTYPE_MAPPED; + + /* setup the resman map data */ + psMapData->psMemInfo = psMemInfo; + psMapData->psSrcMemInfo = psSrcMemInfo; + + /* Register Resource */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICEMEM_MAPPING, + psMapData, + 0, + &UnmapDeviceMemoryCallBack); + + *ppsDstMemInfo = psMemInfo; + + return PVRSRV_OK; + + /* error handling: */ + +ErrorExit: + + if(psSysPAddr) + { + /* Free the page address list */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psSysPAddr, IMG_NULL); + /*not nulling shared pointer, holding structure could be not initialized*/ + } + + if(psMemInfo) + { + /* Free the page address list */ + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + /*not nulling shared pointer, holding structure could be not initialized*/ + } + + if(psMapData) + { + /* Free the resman map data */ + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL); + /*not nulling pointer, out of scope*/ + } + + return eError; +} + + +/*! +****************************************************************************** + @Function PVRSRVUnmapDeviceClassMemoryKM + + @Description unmaps physical pages from devices address space at a specified + Device Virtual Address. + Note: this can only unmap memory mapped by + PVRSRVMapDeviceClassMemoryKM + + @Input psMemInfo - mem info describing the device virtual address + to unmap RAM from + @Return None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + if (!psMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); +} + + +/*! +****************************************************************************** + @Function UnmapDeviceClassMemoryCallBack + + @Description Resman callback to unmap device class memory + + @Input pvParam - opaque void ptr param + @Input ui32Param - opaque unsigned long param + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_DC_MAPINFO *psDCMapInfo = pvParam; + PVRSRV_KERNEL_MEM_INFO *psMemInfo; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psMemInfo = psDCMapInfo->psMemInfo; + +#if defined(SUPPORT_MEMORY_TILING) + if(psDCMapInfo->ui32TilingStride > 0) + { + PVRSRV_DEVICE_NODE *psDeviceNode = psDCMapInfo->psDeviceNode; + + if (psDeviceNode->pfnFreeMemTilingRange(psDeviceNode, + psDCMapInfo->ui32RangeIndex) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceClassMemoryCallBack: FreeMemTilingRange failed")); + } + } +#endif + + (psDCMapInfo->psDeviceClassBuffer->ui32MemMapRefCount)--; + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_MAPINFO), psDCMapInfo, IMG_NULL); + + return FreeMemCallBackCommon(psMemInfo, ui32Param, + PVRSRV_FREE_CALLBACK_ORIGIN_ALLOCATOR); +} + + +/*! +****************************************************************************** + @Function PVRSRVMapDeviceClassMemoryKM + + @Description maps physical pages for DeviceClass buffers into a devices + address space at a specified and pre-allocated Device + Virtual Address + + @Input psPerProc - Per-process data + @Input hDevMemContext - Device memory context + @Input hDeviceClassBuffer - Device Class Buffer (Surface) handle + @Input hDevMemContext - device memory context to which mapping + is made + @Output ppsMemInfo - mem info describing the mapped memory + @Output phOSMapInfo - OS specific mapping information + @Return None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevMemContext, + IMG_HANDLE hDeviceClassBuffer, + PVRSRV_KERNEL_MEM_INFO **ppsMemInfo, + IMG_HANDLE *phOSMapInfo) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE* psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL; + PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer; + IMG_SYS_PHYADDR *psSysPAddr; + IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr; + IMG_BOOL bPhysContig; + BM_CONTEXT *psBMContext; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + IMG_HANDLE hDevMemHeap = IMG_NULL; + IMG_SIZE_T uByteSize; + IMG_SIZE_T ui32Offset; + IMG_SIZE_T ui32PageSize = HOST_PAGESIZE(); + BM_HANDLE hBuffer; + PVRSRV_MEMBLK *psMemBlock; + IMG_BOOL bBMError; + IMG_UINT32 i; + PVRSRV_DC_MAPINFO *psDCMapInfo = IMG_NULL; + + if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo || !hDevMemContext) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* allocate resman storage structure */ + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_DC_MAPINFO), + (IMG_VOID **)&psDCMapInfo, IMG_NULL, + "PVRSRV_DC_MAPINFO") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for psDCMapInfo")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet(psDCMapInfo, 0, sizeof(PVRSRV_DC_MAPINFO)); + + psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer; + + /* + call into external driver to get info so we can map a meminfo + Notes: + It's expected that third party displays will only support + physically contiguous display surfaces. However, it's possible + a given display may have an MMU and therefore support non-contig' + display surfaces. + + If surfaces are contiguous, ext driver should return: + - a CPU virtual address, or IMG_NULL where the surface is not mapped to CPU + - (optional) an OS Mapping handle for KM->UM surface mapping + - the size in bytes + - a single system physical address + + If surfaces are non-contiguous, ext driver should return: + - a CPU virtual address + - (optional) an OS Mapping handle for KM->UM surface mapping + - the size in bytes (must be multiple of 4kB) + - a list of system physical addresses (at 4kB intervals) + */ + eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice, + psDeviceClassBuffer->hExtBuffer, + &psSysPAddr, + &uByteSize, + &pvCPUVAddr, + phOSMapInfo, + &bPhysContig, + &psDCMapInfo->ui32TilingStride); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address")); + goto ErrorExitPhase1; + } + + /* Choose the heap to map to */ + psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext; + psDeviceNode = psBMContext->psDeviceNode; + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++) + { + if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID) + { + if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT) + { + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]); + } + else + { + hDevMemHeap = IMG_NULL; + } + } + else + { + hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap; + } + break; + } + } + + if(hDevMemHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap")); + eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE; + goto ErrorExitPhase1; + } + + /* Only need lower 12 bits of the cpu addr - don't care what size a void* is */ + ui32Offset = ((IMG_UINTPTR_T)pvCPUVAddr) & (ui32PageSize - 1); + pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvCPUVAddr - ui32Offset); + + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psMemInfo, IMG_NULL, + "Kernel Memory Info"); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block")); + goto ErrorExitPhase1; + } + + OSMemSet(psMemInfo, 0, sizeof(*psMemInfo)); + + psMemBlock = &(psMemInfo->sMemBlk); + + bBMError = BM_Wrap(hDevMemHeap, + uByteSize, + ui32Offset, + bPhysContig, + psSysPAddr, + pvPageAlignedCPUVAddr, + &psMemInfo->ui32Flags, + &hBuffer); + + if (!bBMError) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed")); + /*not nulling pointer, out of scope*/ + eError = PVRSRV_ERROR_BAD_MAPPING; + goto ErrorExitPhase2; + } + + /* Fill in "Implementation dependant" section of mem info */ + psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer); + psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer); + + /* Convert from BM_HANDLE to external IMG_HANDLE */ + psMemBlock->hBuffer = (IMG_HANDLE)hBuffer; + + /* patch up the CPU VAddr into the meminfo - use the address from the BM, not the one from the deviceclass + api, to ensure user mode mapping is possible + */ + psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer); + + /* Fill in the public fields of the MEM_INFO structure */ + psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr; + psMemInfo->uAllocSize = uByteSize; + psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo; + + PVR_ASSERT(psMemInfo->psKernelSyncInfo != IMG_NULL); + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoIncRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + + /* Clear the Backup buffer pointer as we do not have one at this point. + We only allocate this as we are going up/down + */ + psMemInfo->pvSysBackupBuffer = IMG_NULL; + + /* setup DCMapInfo */ + psDCMapInfo->psMemInfo = psMemInfo; + psDCMapInfo->psDeviceClassBuffer = psDeviceClassBuffer; + +#if defined(SUPPORT_MEMORY_TILING) + psDCMapInfo->psDeviceNode = psDeviceNode; + + if(psDCMapInfo->ui32TilingStride > 0) + { + /* try to acquire a tiling range on this device */ + eError = psDeviceNode->pfnAllocMemTilingRange(psDeviceNode, + psMemInfo, + psDCMapInfo->ui32TilingStride, + &psDCMapInfo->ui32RangeIndex); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: AllocMemTilingRange failed")); + goto ErrorExitPhase3; + } + } +#endif + + /* Register Resource */ + psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_DEVICECLASSMEM_MAPPING, + psDCMapInfo, + 0, + &UnmapDeviceClassMemoryCallBack); + + (psDeviceClassBuffer->ui32MemMapRefCount)++; + PVRSRVKernelMemInfoIncRef(psMemInfo); + + psMemInfo->memType = PVRSRV_MEMTYPE_DEVICECLASS; + + /* return the meminfo */ + *ppsMemInfo = psMemInfo; + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* If the 3PDD supplies a kernel virtual address, we can PDUMP it */ + if(psMemInfo->pvLinAddrKM) + { + /* FIXME: + * Initialise the display surface here when it is mapped into Services. + * Otherwise there is a risk that pdump toolchain will assign previously + * used physical pages, leading to visual artefacts on the unrendered surface + * (e.g. during LLS rendering). + * + * A better method is to pdump the allocation from the DC driver, so the + * BM_Wrap pdumps only the virtual memory which better represents the driver + * behaviour. + */ + PDUMPCOMMENT("Dump display surface"); + PDUMPMEM(IMG_NULL, psMemInfo, ui32Offset, psMemInfo->uAllocSize, PDUMP_FLAGS_CONTINUOUS, ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping); + } +#endif + return PVRSRV_OK; + +#if defined(SUPPORT_MEMORY_TILING) +ErrorExitPhase3: + if(psMemInfo) + { + if (psMemInfo->psKernelSyncInfo) + { + PVRSRVKernelSyncInfoDecRef(psMemInfo->psKernelSyncInfo, psMemInfo); + } + + FreeDeviceMem(psMemInfo); + /* + FreeDeviceMem will free the meminfo so set + it to NULL to avoid double free below + */ + psMemInfo = IMG_NULL; + } +#endif + +ErrorExitPhase2: + if(psMemInfo) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL); + } + +ErrorExitPhase1: + if(psDCMapInfo) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psDCMapInfo, IMG_NULL); + } + + return eError; +} + + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVChangeDeviceMemoryAttributesKM(IMG_HANDLE hKernelMemInfo, IMG_UINT32 ui32Attribs) +{ + PVRSRV_KERNEL_MEM_INFO *psKMMemInfo; + + if (hKernelMemInfo == IMG_NULL) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psKMMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hKernelMemInfo; + + if (ui32Attribs & PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT) + { + psKMMemInfo->ui32Flags |= PVRSRV_MEM_CACHE_CONSISTENT; + } + else + { + psKMMemInfo->ui32Flags &= ~PVRSRV_MEM_CACHE_CONSISTENT; + } + + return PVRSRV_OK; +} + + +/****************************************************************************** + End of file (devicemem.c) +******************************************************************************/ + diff --git a/pvr-source/services4/srvkm/common/handle.c b/pvr-source/services4/srvkm/common/handle.c new file mode 100644 index 0000000..1e26047 --- /dev/null +++ b/pvr-source/services4/srvkm/common/handle.c @@ -0,0 +1,2689 @@ +/*************************************************************************/ /*! +@Title Resource Handle Manager +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) +/* See handle.h for a description of the handle API. */ + +/* + * There is no locking here. It is assumed the code is used in a single + * threaded environment. In particular, it is assumed that the code will + * never be called from an interrupt handler. + * + * The implmentation supports movable handle structures, allowing the address + * of a handle structure to change without having to fix up pointers in + * any of the handle structures. For example, the linked list mechanism + * used to link subhandles together uses handle array indices rather than + * pointers to the structures themselves. + */ + +#include <stddef.h> + +#include "services_headers.h" +#include "handle.h" + +#ifdef DEBUG +#define HANDLE_BLOCK_SHIFT 2 +#else +#define HANDLE_BLOCK_SHIFT 8 +#endif + +#define DIVIDE_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) >> HANDLE_BLOCK_SHIFT) +#define MULTIPLY_BY_BLOCK_SIZE(i) (((IMG_UINT32)(i)) << HANDLE_BLOCK_SHIFT) + +#define HANDLE_BLOCK_SIZE MULTIPLY_BY_BLOCK_SIZE(1) +#define HANDLE_SUB_BLOCK_MASK (HANDLE_BLOCK_SIZE - 1) +#define HANDLE_BLOCK_MASK (~(HANDLE_SUB_BLOCK_MASK)) + +#define HANDLE_HASH_TAB_INIT_SIZE 32 + +#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount) + +/* Valid handles are never NULL, but handle array indices are based from 0 */ +#if defined (SUPPORT_SID_INTERFACE) +#define INDEX_TO_HANDLE(i) ((IMG_SID)((i) + 1)) +#define HANDLE_TO_INDEX(h) ((IMG_UINT32)(h) - 1) +#else +#define INDEX_TO_HANDLE(i) ((IMG_HANDLE)((IMG_UINTPTR_T)(i) + 1)) +#define HANDLE_TO_INDEX(h) ((IMG_UINT32)(IMG_UINTPTR_T)(h) - 1) + +#endif + +#define INDEX_TO_BLOCK_INDEX(i) DIVIDE_BY_BLOCK_SIZE(i) +#define BLOCK_INDEX_TO_INDEX(i) MULTIPLY_BY_BLOCK_SIZE(i) +#define INDEX_TO_SUB_BLOCK_INDEX(i) ((i) & HANDLE_SUB_BLOCK_MASK) + +#define INDEX_TO_INDEX_STRUCT_PTR(psArray, i) (&((psArray)[INDEX_TO_BLOCK_INDEX(i)])) +#define BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i) INDEX_TO_INDEX_STRUCT_PTR((psBase)->psHandleArray, i) + +#define INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, i) (BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i)->ui32FreeHandBlockCount) + +#define INDEX_TO_HANDLE_STRUCT_PTR(psBase, i) (BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, i)->psHandle + INDEX_TO_SUB_BLOCK_INDEX(i)) + +#define HANDLE_TO_HANDLE_STRUCT_PTR(psBase, h) (INDEX_TO_HANDLE_STRUCT_PTR(psBase, HANDLE_TO_INDEX(h))) + +#define HANDLE_PTR_TO_INDEX(psHandle) ((psHandle)->ui32Index) +#define HANDLE_PTR_TO_HANDLE(psHandle) INDEX_TO_HANDLE(HANDLE_PTR_TO_INDEX(psHandle)) + +#define ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(a) (HANDLE_BLOCK_MASK & (a)) +#define ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(a) ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE((a) + HANDLE_BLOCK_SIZE - 1) + +#define DEFAULT_MAX_HANDLE 0x7fffffffu +#define DEFAULT_MAX_INDEX_PLUS_ONE ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(DEFAULT_MAX_HANDLE) + +#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0) + +#define HANDLE_ARRAY_SIZE(handleCount) DIVIDE_BY_BLOCK_SIZE(ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(handleCount)) + +#define SET_FLAG(v, f) ((IMG_VOID)((v) |= (f))) +#define CLEAR_FLAG(v, f) ((IMG_VOID)((v) &= ~(f))) +#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0)) + +#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f) + +#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f) +#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f) +#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f) + +#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) + +#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) + +#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED) + +#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE) + +#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE) + +#define HANDLE_STRUCT_IS_FREE(psHandle) ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE) + +#ifdef MIN +#undef MIN +#endif + +#define MIN(x, y) (((x) < (y)) ? (x) : (y)) + +/* + * Linked list structure. Used for both the list head and list items. + * Array indices, rather than pointers, are used to point to the next and + * previous items on the list. + */ +struct sHandleList +{ + IMG_UINT32 ui32Prev; + IMG_UINT32 ui32Next; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hParent; +#else + IMG_HANDLE hParent; +#endif +}; + +enum ePVRSRVInternalHandleFlag +{ + INTERNAL_HANDLE_FLAG_NONE = 0x00, + INTERNAL_HANDLE_FLAG_BATCHED = 0x01, + INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02, +}; + +/* Handle structure */ +struct sHandle +{ + /* Handle type */ + PVRSRV_HANDLE_TYPE eType; + + /* Pointer to the data that the handle represents */ + IMG_VOID *pvData; + + /* + * When handles are on the free list, the value of the "next index + * plus one field" has the following meaning: + * zero - next handle is the one that follows this one, + * nonzero - the index of the next handle is the value minus one. + * This scheme means handle space can be initialised to all zeros. + * + * When this field is used to link together handles on a list + * other than the free list, zero indicates the end of the + * list, with nonzero the same as above. + */ + IMG_UINT32 ui32NextIndexPlusOne; + + /* Internal flags */ + enum ePVRSRVInternalHandleFlag eInternalFlag; + + /* Flags specified when the handle was allocated */ + PVRSRV_HANDLE_ALLOC_FLAG eFlag; + + /* Index of this handle in the handle array */ + IMG_UINT32 ui32Index; + + /* List head for subhandles of this handle */ + struct sHandleList sChildren; + + /* List entry for sibling subhandles */ + struct sHandleList sSiblings; +}; + +/* Handle array index structure. + * The handle array is an array of index structures, reallocated as the number of + * handles increases. + * NOTE: There is one index structure per block of handles. + */ +struct sHandleIndex +{ + /* Pointer to first handle structure in the block */ + struct sHandle *psHandle; + + /* Block allocation cookie returned from OSAllocMem for the block of handles */ + IMG_HANDLE hBlockAlloc; + + /* Number of free handles in block */ + IMG_UINT32 ui32FreeHandBlockCount; +}; + +struct _PVRSRV_HANDLE_BASE_ +{ + /* Handle returned from OSAllocMem for handle base allocation */ + IMG_HANDLE hBaseBlockAlloc; + + /* Handle returned from OSAllocMem for handle array allocation */ + IMG_HANDLE hArrayBlockAlloc; + + /* Pointer to array of pointers to handle structures */ + struct sHandleIndex *psHandleArray; + + /* + * Pointer to handle hash table. + * The hash table is used to do reverse lookups, converting data + * pointers to handles. + */ + HASH_TABLE *psHashTab; + + /* Number of free handles */ + IMG_UINT32 ui32FreeHandCount; + + /* + * If purging is not enabled, this is the array index of first free + * handle. + * If purging is enabled, this is the index to start searching for + * a free handle from. In this case it is usually zero, unless + * the handle array size has been increased due to lack of + * handles. + */ + IMG_UINT32 ui32FirstFreeIndex; + + /* Maximum handle index, plus one */ + IMG_UINT32 ui32MaxIndexPlusOne; + + /* Total number of handles, free and allocated */ + IMG_UINT32 ui32TotalHandCount; + + /* + * Index of the last free index, plus one. Not used if purging + * is enabled. + */ + IMG_UINT32 ui32LastFreeIndexPlusOne; + + /* Size of current handle batch, or zero if batching not enabled */ + IMG_UINT32 ui32HandBatchSize; + + /* Number of handles prior to start of current batch */ + IMG_UINT32 ui32TotalHandCountPreBatch; + + /* Index of first handle in batch, plus one */ + IMG_UINT32 ui32FirstBatchIndexPlusOne; + + /* Number of handle allocation failures in batch */ + IMG_UINT32 ui32BatchHandAllocFailures; + + /* Purging enabled. + * If purging is enabled, the size of the table can be reduced + * by removing free space at the end of the table. To make + * purging more likely to succeed, handles are allocated as + * far to the front of the table as possible. The first free + * handle is found by a linear search from the start of the table, + * and so no free handle list management is done. + */ + IMG_BOOL bPurgingEnabled; +}; + +/* + * The key for the handle hash table is an array of three elements, the + * pointer to the resource, the resource type, and the process ID. The + * eHandKey enumeration gives the array indices of the elements making + * up the key. + */ +enum eHandKey { + HAND_KEY_DATA = 0, + HAND_KEY_TYPE, + HAND_KEY_PARENT, + HAND_KEY_LEN /* Must be last item in list */ +}; + +/* + * Kernel handle base structure. For handles that are not allocated on + * behalf of a particular process + */ +PVRSRV_HANDLE_BASE *gpsKernelHandleBase = IMG_NULL; + +/* HAND_KEY is the type of the hash table key */ +typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN]; + +/*! +****************************************************************************** + + @Function HandleListInit + + @Description Initialise a linked list structure embedded in a handle + structure. + + @Input ui32Index - index of handle in the handle array + psList - pointer to linked list structure + hParent - parent handle, or IMG_NULL + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInit) +#endif +static INLINE +#if defined (SUPPORT_SID_INTERFACE) +IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_SID hParent) +#else +IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent) +#endif +{ + psList->ui32Next = ui32Index; + psList->ui32Prev = ui32Index; + psList->hParent = hParent; +} + +/*! +****************************************************************************** + + @Function InitParentList + + @Description Initialise the children list head in a handle structure. + The children are the subhandles of this handle. + + @Input psHandle - pointer to handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitParentList) +#endif +static INLINE +IMG_VOID InitParentList(struct sHandle *psHandle) +{ + IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psHandle); + + HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(ui32Parent)); +} + +/*! +****************************************************************************** + + @Function InitChildEntry + + @Description Initialise the child list entry in a handle structure. + The list entry is used to link together subhandles of + a given handle. + + @Input psHandle - pointer to handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitChildEntry) +#endif +static INLINE +IMG_VOID InitChildEntry(struct sHandle *psHandle) +{ + HandleListInit(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings, IMG_NULL); +} + +/*! +****************************************************************************** + + @Function HandleListIsEmpty + + @Description Determine whether a given linked list is empty. + + @Input ui32Index - index of the handle containing the list head + psList - pointer to the list head + + @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIsEmpty) +#endif +static INLINE +IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList) +{ + IMG_BOOL bIsEmpty; + + bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index); + +#ifdef DEBUG + { + IMG_BOOL bIsEmpty2; + + bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index); + PVR_ASSERT(bIsEmpty == bIsEmpty2); + } +#endif + + return bIsEmpty; +} + +#ifdef DEBUG +/*! +****************************************************************************** + + @Function NoChildren + + @Description Determine whether a handle has any subhandles + + @Input psHandle - pointer to handle structure + + @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoChildren) +#endif +static INLINE +IMG_BOOL NoChildren(struct sHandle *psHandle) +{ + PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psHandle)); + + return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sChildren); +} + +/*! +****************************************************************************** + + @Function NoParent + + @Description Determine whether a handle is a subhandle + + @Input psHandle - pointer to handle structure + + @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(NoParent) +#endif +static INLINE +IMG_BOOL NoParent(struct sHandle *psHandle) +{ + if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings)) + { + PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL); + + return IMG_TRUE; + } + else + { + PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL); + } + return IMG_FALSE; +} +#endif /*DEBUG*/ +/*! +****************************************************************************** + + @Function ParentHandle + + @Description Determine the parent of a handle + + @Input psHandle - pointer to handle structure + + @Return Parent handle, or IMG_NULL if the handle is not a subhandle. + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentHandle) +#endif +static INLINE +#if defined (SUPPORT_SID_INTERFACE) +IMG_SID ParentHandle(struct sHandle *psHandle) +#else +IMG_HANDLE ParentHandle(struct sHandle *psHandle) +#endif +{ + return psHandle->sSiblings.hParent; +} + +/* + * The LIST_PTR_FROM_INDEX_AND_OFFSET macro is used to generate either a + * pointer to the subhandle list head, or a pointer to the linked list + * structure of an item on a subhandle list. + * The list head is itself on the list, but is at a different offset + * in the handle structure to the linked list structure for items on + * the list. The two linked list structures are differentiated by + * the third parameter, containing the parent index. The parent field + * in the list head structure references the handle structure that contains + * it. For items on the list, the parent field in the linked list structure + * references the parent handle, which will be different from the handle + * containing the linked list structure. + */ +#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \ + ((struct sHandleList *)((IMG_CHAR *)(INDEX_TO_HANDLE_STRUCT_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo)))) + +/*! +****************************************************************************** + + @Function HandleListInsertBefore + + @Description Insert a handle before a handle currently on the list. + + @Input ui32InsIndex - index of handle to be inserted after + psIns - pointer to handle structure to be inserted after + uiParentOffset - offset to list head struct in handle structure + ui32EntryIndex - index of handle to be inserted + psEntry - pointer to handle structure of item to be inserted + uiEntryOffset - offset of list item struct in handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListInsertBefore) +#endif +static INLINE +IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex) +{ + /* PRQA S 3305 7 */ /*override stricter alignment warning */ + struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset); + + PVR_ASSERT(psEntry->hParent == IMG_NULL); + PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next); + PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(ui32ParentIndex)); + + psEntry->ui32Prev = psIns->ui32Prev; + psIns->ui32Prev = ui32EntryIndex; + psEntry->ui32Next = ui32InsIndex; + psPrevIns->ui32Next = ui32EntryIndex; + + psEntry->hParent = INDEX_TO_HANDLE(ui32ParentIndex); +} + +/*! +****************************************************************************** + + @Function AdoptChild + + @Description Assign a subhandle to a handle + + @Input psParent - pointer to handle structure of parent handle + psChild - pointer to handle structure of child subhandle + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(AdoptChild) +#endif +static INLINE +IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild) +{ + IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psParent->sChildren.hParent); + + PVR_ASSERT(ui32Parent == HANDLE_PTR_TO_INDEX(psParent)); + + HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent); + +} + +/*! +****************************************************************************** + + @Function HandleListRemove + + @Description Remove a handle from a list + + @Input ui32EntryIndex - index of handle to be removed + psEntry - pointer to handle structure of item to be removed + uiEntryOffset - offset of list item struct in handle structure + uiParentOffset - offset to list head struct in handle structure + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListRemove) +#endif +static INLINE +IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset) +{ + if (!HandleListIsEmpty(ui32EntryIndex, psEntry)) + { + /* PRQA S 3305 3 */ /*override stricter alignment warning */ + struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psEntry->hParent), uiParentOffset, uiEntryOffset); + struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psEntry->hParent), uiParentOffset, uiEntryOffset); + + /* + * The list head is on the list, and we don't want to + * remove it. + */ + PVR_ASSERT(psEntry->hParent != IMG_NULL); + + psPrev->ui32Next = psEntry->ui32Next; + psNext->ui32Prev = psEntry->ui32Prev; + + HandleListInit(ui32EntryIndex, psEntry, IMG_NULL); + } +} + +/*! +****************************************************************************** + + @Function UnlinkFromParent + + @Description Remove a subhandle from its parents list + + @Input psHandle - pointer to handle structure of child subhandle + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(UnlinkFromParent) +#endif +static INLINE +IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle) +{ + HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren)); +} + +/*! +****************************************************************************** + + @Function HandleListIterate + + @Description Iterate over the items in a list + + @Input psHead - pointer to list head + uiParentOffset - offset to list head struct in handle structure + uiEntryOffset - offset of list item struct in handle structure + pfnIterFunc - function to be called for each handle in the list + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(HandleListIterate) +#endif +static INLINE +PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *)) +{ + IMG_UINT32 ui32Index; + IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psHead->hParent); + + PVR_ASSERT(psHead->hParent != IMG_NULL); + + /* + * Follow the next chain from the list head until we reach + * the list head again, which signifies the end of the list. + */ + for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; ) + { + struct sHandle *psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32Index); + /* PRQA S 3305 2 */ /*override stricter alignment warning */ + struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset); + PVRSRV_ERROR eError; + + PVR_ASSERT(psEntry->hParent == psHead->hParent); + /* + * Get the next index now, in case the list item is + * modified by the iteration function. + */ + ui32Index = psEntry->ui32Next; + + eError = (*pfnIterFunc)(psBase, psHandle); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function IterateOverChildren + + @Description Iterate over the subhandles of a parent handle + + @Input psParent - pointer to parent handle structure + pfnIterFunc - function to be called for each subhandle + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(IterateOverChildren) +#endif +static INLINE +PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *)) +{ + return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc); +} + +/*! +****************************************************************************** + + @Function GetHandleStructure + + @Description Get the handle structure for a given handle + + @Input psBase - pointer to handle base structure + ppsHandle - location to return pointer to handle structure + hHandle - handle from client + eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the + handle type is not to be checked. + + @Output ppsHandle - points to a pointer to the handle structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(GetHandleStructure) +#endif +static INLINE +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType) +#else +PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +#endif +{ + IMG_UINT32 ui32Index = HANDLE_TO_INDEX(hHandle); + struct sHandle *psHandle; + + /* Check handle index is in range */ + if (!INDEX_IS_VALID(psBase, ui32Index)) + { + PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%u >= %u)", ui32Index, psBase->ui32TotalHandCount)); +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; + } + + psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32Index); + if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE) + { + PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index)); +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; + } + + /* + * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function, + * check handle is of the correct type. + */ + if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType) + { + PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType)); +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH; + } + + /* Return the handle structure */ + *ppsHandle = psHandle; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function ParentIfPrivate + + @Description Return the parent handle if the handle was allocated + with PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return + IMG_NULL + + @Input psHandle - pointer to handle + + @Return Parent handle, or IMG_NULL + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(ParentIfPrivate) +#endif +static INLINE +#if defined (SUPPORT_SID_INTERFACE) +IMG_SID ParentIfPrivate(struct sHandle *psHandle) +#else +IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle) +#endif +{ + return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? + ParentHandle(psHandle) : IMG_NULL; +} + +/*! +****************************************************************************** + + @Function InitKey + + @Description Initialise a hash table key for the current process + + @Input psBase - pointer to handle base structure + aKey - pointer to key + pvData - pointer to the resource the handle represents + eType - type of resource + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(InitKey) +#endif +static INLINE +#if defined (SUPPORT_SID_INTERFACE) +IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_SID hParent) +#else +IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent) +#endif +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData; + aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType; + aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent; +} + +/*! +****************************************************************************** + + @Function ReallocHandleArray + + @Description Reallocate the handle array + + @Input psBase - handle base. + phBlockAlloc - pointer to block allocation handle. + ui32NewCount - new handle count + ui32OldCount - old handle count + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static +PVRSRV_ERROR ReallocHandleArray(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32NewCount) +{ + struct sHandleIndex *psOldArray = psBase->psHandleArray; + IMG_HANDLE hOldArrayBlockAlloc = psBase->hArrayBlockAlloc; + IMG_UINT32 ui32OldCount = psBase->ui32TotalHandCount; + struct sHandleIndex *psNewArray = IMG_NULL; + IMG_HANDLE hNewArrayBlockAlloc = IMG_NULL; + PVRSRV_ERROR eError; + PVRSRV_ERROR eReturn = PVRSRV_OK; + IMG_UINT32 ui32Index; + + if (ui32NewCount == ui32OldCount) + { + return PVRSRV_OK; + } + + if (ui32NewCount != 0 && !psBase->bPurgingEnabled && + ui32NewCount < ui32OldCount) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (((ui32OldCount % HANDLE_BLOCK_SIZE) != 0) || + ((ui32NewCount % HANDLE_BLOCK_SIZE) != 0)) + { + PVR_ASSERT((ui32OldCount % HANDLE_BLOCK_SIZE) == 0); + PVR_ASSERT((ui32NewCount % HANDLE_BLOCK_SIZE) == 0); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32NewCount != 0) + { + /* Allocate new handle array */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + HANDLE_ARRAY_SIZE(ui32NewCount) * sizeof(struct sHandleIndex), + (IMG_VOID **)&psNewArray, + &hNewArrayBlockAlloc, + "Memory Area"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't allocate new handle array (%d)", eError)); + eReturn = eError; + goto error; + } + + if (ui32OldCount != 0) + { + OSMemCopy(psNewArray, psOldArray, HANDLE_ARRAY_SIZE(MIN(ui32NewCount, ui32OldCount)) * sizeof(struct sHandleIndex)); + } + } + + /* + * If the new handle array is smaller than the old one, free + * unused handle structures + */ + for(ui32Index = ui32NewCount; ui32Index < ui32OldCount; ui32Index += HANDLE_BLOCK_SIZE) + { + struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psOldArray, ui32Index); + + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(struct sHandle) * HANDLE_BLOCK_SIZE, + psIndex->psHandle, + psIndex->hBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free handle structures (%d)", eError)); + } + } + + /* + * If the new handle array is bigger than the old one, allocate + * new handle structures + */ + for(ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) + { + /* PRQA S 0505 1 */ /* psNewArray is never NULL, see assert earlier */ + struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psNewArray, ui32Index); + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(struct sHandle) * HANDLE_BLOCK_SIZE, + (IMG_VOID **)&psIndex->psHandle, + &psIndex->hBlockAlloc, + "Memory Area"); + if (eError != PVRSRV_OK) + { + psIndex->psHandle = IMG_NULL; + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't allocate handle structures (%d)", eError)); + eReturn = eError; + } + else + { + IMG_UINT32 ui32SubIndex; + + psIndex->ui32FreeHandBlockCount = HANDLE_BLOCK_SIZE; + + for(ui32SubIndex = 0; ui32SubIndex < HANDLE_BLOCK_SIZE; ui32SubIndex++) + { + struct sHandle *psHandle = psIndex->psHandle + ui32SubIndex; + + + psHandle->ui32Index = ui32SubIndex + ui32Index; + psHandle->eType = PVRSRV_HANDLE_TYPE_NONE; + psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE; + psHandle->ui32NextIndexPlusOne = 0; + } + } + } + if (eReturn != PVRSRV_OK) + { + goto error; + } + +#ifdef DEBUG_MAX_HANDLE_COUNT + /* Force handle failure to test error exit code */ + if (ui32NewCount > DEBUG_MAX_HANDLE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Max handle count (%u) reached", DEBUG_MAX_HANDLE_COUNT)); + eReturn = PVRSRV_ERROR_OUT_OF_MEMORY; + goto error; + } +#endif + + if (psOldArray != IMG_NULL) + { + /* Free old handle array */ + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + HANDLE_ARRAY_SIZE(ui32OldCount) * sizeof(struct sHandleIndex), + psOldArray, + hOldArrayBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free old handle array (%d)", eError)); + } + } + + psBase->psHandleArray = psNewArray; + psBase->hArrayBlockAlloc = hNewArrayBlockAlloc; + psBase->ui32TotalHandCount = ui32NewCount; + + if (ui32NewCount > ui32OldCount) + { + /* Check for wraparound */ + PVR_ASSERT(psBase->ui32FreeHandCount + (ui32NewCount - ui32OldCount) > psBase->ui32FreeHandCount); + + /* PRQA S 3382 1 */ /* ui32NewCount always > ui32OldCount */ + psBase->ui32FreeHandCount += (ui32NewCount - ui32OldCount); + + /* + * If purging is enabled, there is no free handle list + * management, but as an optimization, when allocating + * new handles, we use ui32FirstFreeIndex to point to + * the first handle in a newly allocated block. + */ + if (psBase->ui32FirstFreeIndex == 0) + { + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); + + psBase->ui32FirstFreeIndex = ui32OldCount; + } + else + { + if (!psBase->bPurgingEnabled) + { + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); + PVR_ASSERT(INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); + + INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32OldCount + 1; + } + } + + if (!psBase->bPurgingEnabled) + { + psBase->ui32LastFreeIndexPlusOne = ui32NewCount; + } + } + else + { + PVR_ASSERT(ui32NewCount == 0 || psBase->bPurgingEnabled); + PVR_ASSERT(ui32NewCount == 0 || psBase->ui32FirstFreeIndex <= ui32NewCount); + PVR_ASSERT(psBase->ui32FreeHandCount - (ui32OldCount - ui32NewCount) < psBase->ui32FreeHandCount); + + /* PRQA S 3382 1 */ /* ui32OldCount always >= ui32NewCount */ + psBase->ui32FreeHandCount -= (ui32OldCount - ui32NewCount); + + if (ui32NewCount == 0) + { + psBase->ui32FirstFreeIndex = 0; + psBase->ui32LastFreeIndexPlusOne = 0; + } + } + + PVR_ASSERT(psBase->ui32FirstFreeIndex <= psBase->ui32TotalHandCount); + + return PVRSRV_OK; + +error: + PVR_ASSERT(eReturn != PVRSRV_OK); + + if (psNewArray != IMG_NULL) + { + /* Free any new handle structures that were allocated */ + for(ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) + { + struct sHandleIndex *psIndex = INDEX_TO_INDEX_STRUCT_PTR(psNewArray, ui32Index); + if (psIndex->psHandle != IMG_NULL) + { + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(struct sHandle) * HANDLE_BLOCK_SIZE, + psIndex->psHandle, + psIndex->hBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free handle structures (%d)", eError)); + } + } + } + + /* Free new handle array */ + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + HANDLE_ARRAY_SIZE(ui32NewCount) * sizeof(struct sHandleIndex), + psNewArray, + hNewArrayBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ReallocHandleArray: Couldn't free new handle array (%d)", eError)); + } + } + + return eReturn; +} + +/*! +****************************************************************************** + + @Function FreeHandleArray + + @Description Frees the handle array. + The memory containing the array of handle structure + pointers is deallocated. + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase) +{ + return ReallocHandleArray(psBase, 0); +} + +/*! +****************************************************************************** + + @Function FreeHandle + + @Description Free a handle structure. + + @Input psBase - pointer to handle base structure + psHandle - pointer to handle structure + + @Return PVRSRV_OK or PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle) +{ + HAND_KEY aKey; + IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psHandle); + PVRSRV_ERROR eError; + + /* + * If a handle allocated in batch mode is freed whilst still + * in batch mode, the type is set to PVRSRV_HANDLE_TYPE_NONE further + * down, to indicate the handle will not be used, but not actually + * freed. The Free is completed when this function is called a + * second time as part of the batch commit or release. + */ + + InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle)); + + if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hHandle; + hHandle = (IMG_SID) HASH_Remove_Extended(psBase->psHashTab, aKey); +#else + IMG_HANDLE hHandle; + hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey); + +#endif + + PVR_ASSERT(hHandle != IMG_NULL); + PVR_ASSERT(hHandle == INDEX_TO_HANDLE(ui32Index)); + PVR_UNREFERENCED_PARAMETER(hHandle); + } + + /* Unlink handle from parent */ + UnlinkFromParent(psBase, psHandle); + + /* Free children */ + eError = IterateOverChildren(psBase, psHandle, FreeHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError)); + return eError; + } + + /* + * Clear the type here, so that a handle can no longer be looked + * up if it is only partially freed. + */ + psHandle->eType = PVRSRV_HANDLE_TYPE_NONE; + + if (BATCHED_HANDLE(psHandle) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { + /* PRQA S 1474,4130 1 */ /* ignore warnings about enum types being modified */ + SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle); + /* + * If the handle was allocated in batch mode, delay the free + * until the batch commit or release. + */ + return PVRSRV_OK; + } + + /* No free list management if purging is enabled */ + if (!psBase->bPurgingEnabled) + { + if (psBase->ui32FreeHandCount == 0) + { + PVR_ASSERT(psBase->ui32FirstFreeIndex == 0); + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); + + psBase->ui32FirstFreeIndex = ui32Index; + } + else + { + /* + * Put the handle pointer on the end of the the free + * handle pointer linked list. + */ + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); + PVR_ASSERT(INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); + INDEX_TO_HANDLE_STRUCT_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32Index + 1; + } + + PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0); + + /* Update the end of the free handle linked list */ + psBase->ui32LastFreeIndexPlusOne = ui32Index + 1; + } + + psBase->ui32FreeHandCount++; + INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32Index)++; + + PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32Index)<= HANDLE_BLOCK_SIZE); + +#ifdef DEBUG + { + IMG_UINT32 ui32BlockedIndex; + IMG_UINT32 ui32FreeHandCount = 0; + + for (ui32BlockedIndex = 0; ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) + { + ui32FreeHandCount += INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32BlockedIndex); + } + + PVR_ASSERT(ui32FreeHandCount == psBase->ui32FreeHandCount); + } +#endif + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function FreeAllHandles + + @Description Free all handles for a given handle base + + @Input psBase - pointer to handle base structure + + @Return PVRSRV_OK or PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase) +{ + IMG_UINT32 i; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount) + { + return eError; + } + + for (i = 0; i < psBase->ui32TotalHandCount; i++) + { + struct sHandle *psHandle; + + psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, i); + + if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE) + { + eError = FreeHandle(psBase, psHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError)); + break; + } + + /* Break out of loop if all the handles free */ + if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount) + { + break; + } + } + } + + return eError; +} + +/*! +****************************************************************************** + + @Function FreeHandleBase + + @Description Free a handle base. + + @Input psHandleBase - pointer to handle base + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase) +{ + PVRSRV_ERROR eError; + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_WARNING, "FreeHandleBase: Uncommitted/Unreleased handle batch")); + PVRSRVReleaseHandleBatch(psBase); + } + + /* Free the handle array */ + eError = FreeAllHandles(psBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError)); + return eError; + } + + /* Free the handle array */ + eError = FreeHandleArray(psBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError)); + return eError; + } + + if (psBase->psHashTab != IMG_NULL) + { + /* Free the hash table */ + HASH_Delete(psBase->psHashTab); + } + + eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*psBase), + psBase, + psBase->hBaseBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function FindHandle + + @Description Find handle corresponding to a resource pointer + + @Input psBase - pointer to handle base structure + pvData - pointer to resource to be associated with the handle + eType - the type of resource + + @Return the handle, or IMG_NULL if not found + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(FindHandle) +#endif +static INLINE +#if defined (SUPPORT_SID_INTERFACE) +IMG_SID FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_SID hParent) +#else +IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent) +#endif +{ + HAND_KEY aKey; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + InitKey(aKey, psBase, pvData, eType, hParent); + +#if defined (SUPPORT_SID_INTERFACE) + return (IMG_SID) HASH_Retrieve_Extended(psBase->psHashTab, aKey); +#else + return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey); +#endif +} + +/*! +****************************************************************************** + + @Function IncreaseHandleArraySize + + @Description Allocate some more free handles + + @Input psBase - pointer to handle base structure + ui32Delta - number of new handles required + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Delta) +{ + PVRSRV_ERROR eError; + IMG_UINT32 ui32DeltaAdjusted = ROUND_UP_TO_MULTIPLE_OF_BLOCK_SIZE(ui32Delta); + IMG_UINT32 ui32NewTotalHandCount = psBase->ui32TotalHandCount + ui32DeltaAdjusted; + + PVR_ASSERT(ui32Delta != 0); + + /* + * Check new count against max handle index, and check for wrap around. + */ + if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne || ui32NewTotalHandCount <= psBase->ui32TotalHandCount) + { + ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne; + + ui32DeltaAdjusted = ui32NewTotalHandCount - psBase->ui32TotalHandCount; + + if (ui32DeltaAdjusted < ui32Delta) + { + PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Maximum handle limit reached (%d)", psBase->ui32MaxIndexPlusOne)); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } + + PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta); + + /* Realloc handle pointer array */ + eError = ReallocHandleArray(psBase, ui32NewTotalHandCount); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: ReallocHandleArray failed (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function EnsureFreeHandles + + @Description Ensure there are enough free handles + + @Input psBase - pointer to handle base structure + ui32Free - number of free handles required + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Free) +{ + PVRSRV_ERROR eError; + + if (ui32Free > psBase->ui32FreeHandCount) + { + IMG_UINT32 ui32FreeHandDelta = ui32Free - psBase->ui32FreeHandCount; + eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)", ui32FreeHandDelta, ui32Free, eError)); + + return eError; + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function AllocHandle + + @Description Allocate a new handle + + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + hParent - parent handle or IMG_NULL + + @Output phHandle - points to new handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_SID hParent) +#else +static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent) +#endif +{ + IMG_UINT32 ui32NewIndex = DEFAULT_MAX_INDEX_PLUS_ONE; + struct sHandle *psNewHandle = IMG_NULL; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hHandle; +#else + IMG_HANDLE hHandle; +#endif + HAND_KEY aKey; + PVRSRV_ERROR eError; + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + PVR_ASSERT(psBase != IMG_NULL); + PVR_ASSERT(psBase->psHashTab != IMG_NULL); + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* Handle must not already exist */ + PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL); + } + + if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_WARNING, "AllocHandle: Handle batch size (%u) was too small, allocating additional space", psBase->ui32HandBatchSize)); + } + + /* Ensure there is a free handle */ + eError = EnsureFreeHandles(psBase, 1); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "AllocHandle: EnsureFreeHandles failed (%d)", eError)); + return eError; + } + PVR_ASSERT(psBase->ui32FreeHandCount != 0); + + if (!psBase->bPurgingEnabled) + { + /* Array index of first free handle */ + ui32NewIndex = psBase->ui32FirstFreeIndex; + + /* Get handle array entry */ + psNewHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32NewIndex); + } + else + { + IMG_UINT32 ui32BlockedIndex; + + /* + * If purging is enabled, we always try to allocate handles + * at the front of the array, to increase the chances that + * the size of the handle array can be reduced by a purge. + * No linked list of free handles is kept; we search for + * free handles as required. + */ + + /* + * ui32FirstFreeIndex should only be set when a new batch of + * handle structures is allocated, and should always be a + * multiple of the block size. + */ + PVR_ASSERT((psBase->ui32FirstFreeIndex % HANDLE_BLOCK_SIZE) == 0); + + for (ui32BlockedIndex = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(psBase->ui32FirstFreeIndex); ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) + { + struct sHandleIndex *psIndex = BASE_AND_INDEX_TO_INDEX_STRUCT_PTR(psBase, ui32BlockedIndex); + + if (psIndex->ui32FreeHandBlockCount == 0) + { + continue; + } + + for (ui32NewIndex = ui32BlockedIndex; ui32NewIndex < ui32BlockedIndex + HANDLE_BLOCK_SIZE; ui32NewIndex++) + { + psNewHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32NewIndex); + if (HANDLE_STRUCT_IS_FREE(psNewHandle)) + { + break; + } + } + } + psBase->ui32FirstFreeIndex = 0; + PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount); + } + PVR_ASSERT(psNewHandle != IMG_NULL); + + /* Handle to be returned to client */ + hHandle = INDEX_TO_HANDLE(ui32NewIndex); + + /* + * If a data pointer can be associated with multiple handles, we + * don't put the handle in the hash table, as the data pointer + * may not map to a unique handle + */ + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* Initialise hash key */ + InitKey(aKey, psBase, pvData, eType, hParent); + + /* Put the new handle in the hash table */ + if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle)) + { + PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table")); + + return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; + } + } + + psBase->ui32FreeHandCount--; + + PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex) <= HANDLE_BLOCK_SIZE); + PVR_ASSERT(INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex) > 0); + + INDEX_TO_FREE_HAND_BLOCK_COUNT(psBase, ui32NewIndex)--; + + /* No free list management if purging is enabled */ + if (!psBase->bPurgingEnabled) + { + /* Check whether the last free handle has been allocated */ + if (psBase->ui32FreeHandCount == 0) + { + PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex); + PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1)); + + psBase->ui32LastFreeIndexPlusOne = 0; + psBase->ui32FirstFreeIndex = 0; + } + else + { + /* + * Update the first free handle index. + * If the "next free index plus one" field in the new + * handle structure is zero, the next free index is + * the index of the new handle plus one. This + * convention has been adopted to simplify the + * initialisation of freshly allocated handle + * space. + */ + psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ? + ui32NewIndex + 1 : + psNewHandle->ui32NextIndexPlusOne - 1; + } + } + + /* Initialise the newly allocated handle */ + PVR_ASSERT(psNewHandle->ui32Index == ui32NewIndex); + + /* PRQA S 0505 1 */ /* psNewHandle is never NULL, see assert earlier */ + psNewHandle->eType = eType; + psNewHandle->pvData = pvData; + psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE; + psNewHandle->eFlag = eFlag; + + InitParentList(psNewHandle); +#if defined(DEBUG) + PVR_ASSERT(NoChildren(psNewHandle)); +#endif + + InitChildEntry(psNewHandle); +#if defined(DEBUG) + PVR_ASSERT(NoParent(psNewHandle)); +#endif + + if (HANDLES_BATCHED(psBase)) + { + /* Add handle to batch list */ + psNewHandle->ui32NextIndexPlusOne = psBase->ui32FirstBatchIndexPlusOne; + + psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1; + + /* PRQA S 1474 1 */ /* ignore warnings about enum types being modified */ + SET_BATCHED_HANDLE(psNewHandle); + } + else + { + psNewHandle->ui32NextIndexPlusOne = 0; + } + + /* Return the new handle to the client */ + *phHandle = hHandle; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVAllocHandle + + @Description Allocate a handle + + @Input phHandle - location for new handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + + @Output phHandle - points to new handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag) +#else +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag) +#endif +{ +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hHandle; +#else + IMG_HANDLE hHandle; +#endif + PVRSRV_ERROR eError; + +#if defined (SUPPORT_SID_INTERFACE) + *phHandle = 0; +#else + *phHandle = IMG_NULL; +#endif + + if (HANDLES_BATCHED(psBase)) + { + /* + * Increment the counter in case of failure. It will be + * decremented on success. + */ + psBase->ui32BatchHandAllocFailures++; + } + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* See if there is already a handle for this data pointer */ + hHandle = FindHandle(psBase, pvData, eType, IMG_NULL); +#if defined (SUPPORT_SID_INTERFACE) + if (hHandle != 0) +#else + if (hHandle != IMG_NULL) +#endif + { + struct sHandle *psHandle; + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed")); + return eError; + } + + /* + * If the client is willing to share a handle, and the + * existing handle is marked as shareable, return the + * existing handle. + */ + if (TEST_FLAG(psHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED)) + { + *phHandle = hHandle; + eError = PVRSRV_OK; + goto exit_ok; + } + +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return PVRSRV_ERROR_HANDLE_NOT_SHAREABLE; + } + } + + eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL); + +exit_ok: + if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK)) + { + psBase->ui32BatchHandAllocFailures--; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVAllocSubHandle + + @Description Allocate a subhandle + + @Input phHandle - location for new subhandle + pvData - pointer to resource to be associated with the subhandle + eType - the type of resource + hParent - parent handle + + @Output phHandle - points to new subhandle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_SID hParent) +#else +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent) +#endif +{ + struct sHandle *psPHand; + struct sHandle *psCHand; + PVRSRV_ERROR eError; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hParentKey; + IMG_SID hHandle; + + *phHandle = 0; +#else + IMG_HANDLE hParentKey; + IMG_HANDLE hHandle; + + *phHandle = IMG_NULL; +#endif + + if (HANDLES_BATCHED(psBase)) + { + /* + * Increment the counter in case of failure. It will be + * decremented on success. + */ + psBase->ui32BatchHandAllocFailures++; + } + + /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? + hParent : IMG_NULL; + + /* Lookup the parent handle */ + eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + return eError; + } + + if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) + { + /* See if there is already a handle for this data pointer */ + hHandle = FindHandle(psBase, pvData, eType, hParentKey); +#if defined (SUPPORT_SID_INTERFACE) + if (hHandle != 0) +#else + if (hHandle != IMG_NULL) +#endif + { + struct sHandle *psCHandle; + PVRSRV_ERROR eErr; + + eErr = GetHandleStructure(psBase, &psCHandle, hHandle, eType); + if (eErr != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed")); + return eErr; + } + + PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle)) == hParent); + + /* + * If the client is willing to share a handle, the + * existing handle is marked as shareable, and the + * existing handle has the same parent, return the + * existing handle. + */ + if (TEST_FLAG(psCHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle)) == hParent) + { + *phHandle = hHandle; + goto exit_ok; + } +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return PVRSRV_ERROR_HANDLE_NOT_SHAREABLE; + } + } + + eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey); + if (eError != PVRSRV_OK) + { + return eError; + } + + /* + * Get the parent handle structure again, in case the handle + * structure has moved (depending on the implementation + * of AllocHandle). + */ + psPHand = HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hParent); + + psCHand = HANDLE_TO_HANDLE_STRUCT_PTR(psBase, hHandle); + + AdoptChild(psBase, psPHand, psCHand); + + *phHandle = hHandle; + +exit_ok: + if (HANDLES_BATCHED(psBase)) + { + psBase->ui32BatchHandAllocFailures--; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVFindHandle + + @Description Find handle corresponding to a resource pointer + + @Input phHandle - location for returned handle + pvData - pointer to resource to be associated with the handle + eType - the type of resource + + @Output phHandle - points to handle + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType) +#else +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType) +#endif +{ +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hHandle; +#else + IMG_HANDLE hHandle; +#endif + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + /* See if there is a handle for this data pointer */ +#if defined (SUPPORT_SID_INTERFACE) + hHandle = (IMG_SID) FindHandle(psBase, pvData, eType, IMG_NULL); +#else + hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL); +#endif + if (hHandle == IMG_NULL) + { + return PVRSRV_ERROR_HANDLE_NOT_FOUND; + } + + *phHandle = hHandle; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupHandleAnyType + + @Description Lookup the data pointer and type corresponding to a handle + + @Input ppvData - location to return data pointer + peType - location to return handle type + hHandle - handle from client + + @Output ppvData - points to the data pointer + peType - points to handle type + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_SID hHandle) +#else +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle) +#endif +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError)); +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return eError; + } + + *ppvData = psHandle->pvData; + *peType = psHandle->eType; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupHandle + + @Description Lookup the data pointer corresponding to a handle + + @Input ppvData - location to return data pointer + hHandle - handle from client + eType - handle type + + @Output ppvData - points to the data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType) +#else +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +#endif +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); +#if defined (SUPPORT_SID_INTERFACE) + PVR_ASSERT(hHandle != 0); +#endif + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError)); +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return eError; + } + + *ppvData = psHandle->pvData; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupSubHandle + + @Description Lookup the data pointer corresponding to a subhandle + + @Input ppvData - location to return data pointer + hHandle - handle from client + eType - handle type + hAncestor - ancestor handle + + @Output ppvData - points to the data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType, IMG_SID hAncestor) +#else +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor) +#endif +{ + struct sHandle *psPHand; + struct sHandle *psCHand; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); +#if defined (SUPPORT_SID_INTERFACE) + PVR_ASSERT(hHandle != 0); +#endif + + eError = GetHandleStructure(psBase, &psCHand, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError)); + return eError; + } + + /* Look for hAncestor among the handle's ancestors */ + for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; ) + { + eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor")); + return PVRSRV_ERROR_INVALID_SUBHANDLE; + } + } + + *ppvData = psCHand->pvData; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVGetParentHandle + + @Description Lookup the parent of a handle + + @Input phParent - location for returning parent handle + hHandle - handle for which the parent handle is required + eType - handle type + hParent - parent handle + + @Output *phParent - parent handle, or IMG_NULL if there is no parent + + @Return Error code or PVRSRV_OK. Note that not having a parent is + not regarded as an error. + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phParent, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType) +#else +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +#endif +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError)); + return eError; + } + + *phParent = ParentHandle(psHandle); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVLookupAndReleaseHandle + + @Description Lookup the data pointer corresponding to a handle + + @Input ppvData - location to return data pointer + hHandle - handle from client + eType - handle type + eFlag - lookup flags + + @Output ppvData - points to the data pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType) +#else +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +#endif +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError)); +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#endif + return eError; + } + + *ppvData = psHandle->pvData; + + eError = FreeHandle(psBase, psHandle); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVReleaseHandle + + @Description Release a handle that is no longer needed + + @Input hHandle - handle from client + eType - handle type + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType) +#else +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +#endif +{ + struct sHandle *psHandle; + PVRSRV_ERROR eError; + + PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); + + eError = GetHandleStructure(psBase, &psHandle, hHandle, eType); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError)); + return eError; + } + + eError = FreeHandle(psBase, psHandle); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVNewHandleBatch + + @Description Start a new handle batch + + @Input psBase - handle base + @Input ui32BatchSize - handle batch size + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize) +{ + PVRSRV_ERROR eError; + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)", psBase->ui32HandBatchSize)); + return PVRSRV_ERROR_HANDLE_BATCH_IN_USE; + } + + if (ui32BatchSize == 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: Invalid batch size (%u)", ui32BatchSize)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = EnsureFreeHandles(psBase, ui32BatchSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)", eError)); + return eError; + } + + psBase->ui32HandBatchSize = ui32BatchSize; + + /* Record current number of handles */ + psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount; + + PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0); + + PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0); + + PVR_ASSERT(HANDLES_BATCHED(psBase)); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVHandleBatchCommitOrRelease + + @Description Release a handle batch + + @Input psBase - handle base + bCommit - commit handles + + @Return none + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE *psBase, IMG_BOOL bCommit) +{ + + IMG_UINT32 ui32IndexPlusOne; + IMG_BOOL bCommitBatch = bCommit; + + if (!HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: There is no handle batch")); + return PVRSRV_ERROR_INVALID_PARAMS; + + } + + if (psBase->ui32BatchHandAllocFailures != 0) + { + if (bCommit) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures.")); + } + bCommitBatch = IMG_FALSE; + } + /* + * The whole point of batched handles is to avoid handle allocation + * failures. + */ + PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit); + + ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne; + while(ui32IndexPlusOne != 0) + { + struct sHandle *psHandle = INDEX_TO_HANDLE_STRUCT_PTR(psBase, ui32IndexPlusOne - 1); + IMG_UINT32 ui32NextIndexPlusOne = psHandle->ui32NextIndexPlusOne; + PVR_ASSERT(BATCHED_HANDLE(psHandle)); + + psHandle->ui32NextIndexPlusOne = 0; + + if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { + PVRSRV_ERROR eError; + + /* + * We need a complete free here. If the handle + * is not partially free, set the handle as + * unbatched to avoid a partial free. + */ + if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle)) + { + /* PRQA S 1474,4130 1 */ /* ignore warnings about enum types being modified */ + SET_UNBATCHED_HANDLE(psHandle); /* PRQA S 4130 */ /* mis-use of enums FIXME*/ + } + + eError = FreeHandle(psBase, psHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)", eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); + } + else + { + /* PRQA S 1474,4130 1 */ /* ignore warnings about enum types being modified */ + SET_UNBATCHED_HANDLE(psHandle); + } + + ui32IndexPlusOne = ui32NextIndexPlusOne; + } + +#ifdef DEBUG + if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount) + { + IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - psBase->ui32TotalHandCountPreBatch; + + PVR_ASSERT(psBase->ui32TotalHandCount > psBase->ui32TotalHandCountPreBatch); + + PVR_DPF((PVR_DBG_WARNING, "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u", psBase->ui32HandBatchSize, psBase->ui32HandBatchSize + ui32Delta)); + + } +#endif + + psBase->ui32HandBatchSize = 0; + psBase->ui32FirstBatchIndexPlusOne = 0; + psBase->ui32TotalHandCountPreBatch = 0; + psBase->ui32BatchHandAllocFailures = 0; + + if (psBase->ui32BatchHandAllocFailures != 0 && bCommit) + { + PVR_ASSERT(!bCommitBatch); + + return PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVCommitHandleBatch + + @Description Commit a handle batch + + @Input psBase - handle base + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE); +} + +/*! +****************************************************************************** + + @Function PVRSRReleaseHandleBatch + + @Description Release a handle batch + + @Input psBase - handle base + + @Return none + +******************************************************************************/ +IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + (IMG_VOID) PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE); +} + +/*! +****************************************************************************** + + @Function PVRSRVSetMaxHandle + + @Description Set maximum handle number for given handle base + + @Input psBase - pointer to handle base structure + ui32MaxHandle - Maximum handle number + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle) +{ + IMG_UINT32 ui32MaxHandleRounded; + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set whilst in batch mode")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Validate the limit */ + if (ui32MaxHandle == 0 || ui32MaxHandle > DEFAULT_MAX_HANDLE) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit must be between %u and %u, inclusive", 0, DEFAULT_MAX_HANDLE)); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* The limit can only be set if no handles have been allocated */ + if (psBase->ui32TotalHandCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set because handles have already been allocated")); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + ui32MaxHandleRounded = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(ui32MaxHandle); + + /* + * Allow the maximum number of handles to be reduced, but never to + * zero. + */ + if (ui32MaxHandleRounded != 0 && ui32MaxHandleRounded < psBase->ui32MaxIndexPlusOne) + { + psBase->ui32MaxIndexPlusOne = ui32MaxHandleRounded; + } + + PVR_ASSERT(psBase->ui32MaxIndexPlusOne != 0); + PVR_ASSERT(psBase->ui32MaxIndexPlusOne <= DEFAULT_MAX_INDEX_PLUS_ONE); + PVR_ASSERT((psBase->ui32MaxIndexPlusOne % HANDLE_BLOCK_SIZE) == 0); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVGetMaxHandle + + @Description Get maximum handle number for given handle base + + @Input psBase - pointer to handle base structure + + @Output Maximum handle number, or 0 if handle limits not + supported. + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase) +{ + return psBase->ui32MaxIndexPlusOne; +} + +/*! +****************************************************************************** + + @Function PVRSRVEnableHandlePurging + + @Description Enable purging for a given handle base + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase) +{ + if (psBase->bPurgingEnabled) + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVEnableHandlePurging: Purging already enabled")); + return PVRSRV_OK; + } + + /* Purging can only be enabled if no handles have been allocated */ + if (psBase->ui32TotalHandCount != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVEnableHandlePurging: Handles have already been allocated")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psBase->bPurgingEnabled = IMG_TRUE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVPurgeHandles + + @Description Purge handles for a given handle base + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) +{ + IMG_UINT32 ui32BlockIndex; + IMG_UINT32 ui32NewHandCount; + + if (!psBase->bPurgingEnabled) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not enabled for this handle base")); + return PVRSRV_ERROR_NOT_SUPPORTED; + } + + if (HANDLES_BATCHED(psBase)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not allowed whilst in batch mode")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PVR_ASSERT((psBase->ui32TotalHandCount % HANDLE_BLOCK_SIZE) == 0); + + for (ui32BlockIndex = INDEX_TO_BLOCK_INDEX(psBase->ui32TotalHandCount); ui32BlockIndex != 0; ui32BlockIndex--) + { + if (psBase->psHandleArray[ui32BlockIndex - 1].ui32FreeHandBlockCount != HANDLE_BLOCK_SIZE) + { + break; + } + } + ui32NewHandCount = BLOCK_INDEX_TO_INDEX(ui32BlockIndex); + + /* + * Check for a suitable decrease in the handle count. + */ + if (ui32NewHandCount <= (psBase->ui32TotalHandCount/2)) + { + PVRSRV_ERROR eError; + + // PVR_TRACE((" PVRSRVPurgeHandles: reducing number of handles from %u to %u", psBase->ui32TotalHandCount, ui32NewHandCount)); + + eError = ReallocHandleArray(psBase, ui32NewHandCount); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVAllocHandleBase + + @Description Allocate a handle base structure for a process + + @Input ppsBase - pointer to handle base structure pointer + + @Output ppsBase - points to handle base structure pointer + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase) +{ + PVRSRV_HANDLE_BASE *psBase; + IMG_HANDLE hBlockAlloc; + PVRSRV_ERROR eError; + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(*psBase), + (IMG_PVOID *)&psBase, + &hBlockAlloc, + "Handle Base"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError)); + return eError; + } + OSMemSet(psBase, 0, sizeof(*psBase)); + + /* Create hash table */ + psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default); + if (psBase->psHashTab == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n")); + (IMG_VOID)PVRSRVFreeHandleBase(psBase); + return PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE; + } + + psBase->hBaseBlockAlloc = hBlockAlloc; + + psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE; + + *ppsBase = psBase; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVFreeHandleBase + + @Description Free a handle base structure + + @Input psBase - pointer to handle base structure + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psBase != gpsKernelHandleBase); + + eError = FreeHandleBase(psBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)", eError)); + } + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVHandleInit + + @Description Initialise handle management + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(gpsKernelHandleBase == IMG_NULL); + + eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)", eError)); + goto error; + } + + eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%d)", eError)); + goto error; + } + + return PVRSRV_OK; +error: + (IMG_VOID) PVRSRVHandleDeInit(); + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVHandleDeInit + + @Description De-initialise handle management + + @Return Error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if (gpsKernelHandleBase != IMG_NULL) + { + eError = FreeHandleBase(gpsKernelHandleBase); + if (eError == PVRSRV_OK) + { + gpsKernelHandleBase = IMG_NULL; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleDeInit: FreeHandleBase failed (%d)", eError)); + } + } + + return eError; +} +#else +/* disable warning about empty module */ +#endif /* #if defined(PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) */ +/****************************************************************************** + End of file (handle.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/hash.c b/pvr-source/services4/srvkm/common/hash.c new file mode 100644 index 0000000..1569425 --- /dev/null +++ b/pvr-source/services4/srvkm/common/hash.c @@ -0,0 +1,738 @@ +/*************************************************************************/ /*! +@Title Self scaling hash tables. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description + Implements simple self scaling hash tables. Hash collisions are + handled by chaining entries together. Hash tables are increased in + size when they become more than (50%?) full and decreased in size + when less than (25%?) full. Hash tables are never decreased below + their initial size. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "pvr_debug.h" +#include "img_defs.h" +#include "services.h" +#include "servicesint.h" +#include "hash.h" +#include "osfunc.h" + +#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b)) + +#define KEY_TO_INDEX(pHash, key, uSize) \ + ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize)) + +#define KEY_COMPARE(pHash, pKey1, pKey2) \ + ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2))) + +/* Each entry in a hash table is placed into a bucket */ +struct _BUCKET_ +{ + /* the next bucket on the same chain */ + struct _BUCKET_ *pNext; + + /* entry value */ + IMG_UINTPTR_T v; + + /* entry key */ + IMG_UINTPTR_T k[]; /* PRQA S 0642 */ /* override dynamic array declaration warning */ +}; +typedef struct _BUCKET_ BUCKET; + +struct _HASH_TABLE_ +{ + /* the hash table array */ + BUCKET **ppBucketTable; + + /* current size of the hash table */ + IMG_UINT32 uSize; + + /* number of entries currently in the hash table */ + IMG_UINT32 uCount; + + /* the minimum size that the hash table should be re-sized to */ + IMG_UINT32 uMinimumSize; + + /* size of key in bytes */ + IMG_UINT32 uKeySize; + + /* hash function */ + HASH_FUNC *pfnHashFunc; + + /* key comparison function */ + HASH_KEY_COMP *pfnKeyComp; +}; + +/*! +****************************************************************************** + @Function HASH_Func_Default + + @Description Hash function intended for hashing keys composed of + IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey - a pointer to the key to hash. + @Input uHashTabLen - the length of the hash table. + + @Return the hash value. +******************************************************************************/ +IMG_UINT32 +HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen) +{ + IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey; + IMG_UINT32 uKeyLen = (IMG_UINT32)(uKeySize / sizeof(IMG_UINTPTR_T)); + IMG_UINT32 ui; + IMG_UINT32 uHashKey = 0; + + PVR_UNREFERENCED_PARAMETER(uHashTabLen); + + PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + IMG_UINT32 uHashPart = (IMG_UINT32)*p++; + + uHashPart += (uHashPart << 12); + uHashPart ^= (uHashPart >> 22); + uHashPart += (uHashPart << 4); + uHashPart ^= (uHashPart >> 9); + uHashPart += (uHashPart << 10); + uHashPart ^= (uHashPart >> 2); + uHashPart += (uHashPart << 7); + uHashPart ^= (uHashPart >> 12); + + uHashKey += uHashPart; + } + + return uHashKey; +} + +/*! +****************************************************************************** + @Function HASH_Key_Comp_Default + + @Description Compares keys composed of IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey1 - pointer to first hash key to compare. + @Input pKey2 - pointer to second hash key to compare. + @Return IMG_TRUE - the keys match. + IMG_FALSE - the keys don't match. +******************************************************************************/ +IMG_BOOL +HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2) +{ + IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1; + IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2; + IMG_UINT32 uKeyLen = (IMG_UINT32)(uKeySize / sizeof(IMG_UINTPTR_T)); + IMG_UINT32 ui; + + PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0); + + for (ui = 0; ui < uKeyLen; ui++) + { + if (*p1++ != *p2++) + return IMG_FALSE; + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + @Function _ChainInsert + + @Description Insert a bucket into the appropriate hash table chain. + + @Input pBucket - the bucket + @Input ppBucketTable - the hash table + @Input uSize - the size of the hash table + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize) +{ + IMG_UINT32 uIndex; + + PVR_ASSERT (pBucket != IMG_NULL); + PVR_ASSERT (ppBucketTable != IMG_NULL); + PVR_ASSERT (uSize != 0); + + if ((pBucket == IMG_NULL) || (ppBucketTable == IMG_NULL) || (uSize == 0)) + { + PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */ + pBucket->pNext = ppBucketTable[uIndex]; + ppBucketTable[uIndex] = pBucket; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function _Rehash + + @Description Iterate over every entry in an old hash table and + rehash into the new table. + + @Input ppOldTable - the old hash table + @Input uOldSize - the size of the old hash table + @Input ppNewTable - the new hash table + @Input uNewSize - the size of the new hash table + + @Return None +******************************************************************************/ +static PVRSRV_ERROR +_Rehash (HASH_TABLE *pHash, + BUCKET **ppOldTable, IMG_UINT32 uOldSize, + BUCKET **ppNewTable, IMG_UINT32 uNewSize) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex< uOldSize; uIndex++) + { + BUCKET *pBucket; + pBucket = ppOldTable[uIndex]; + while (pBucket != IMG_NULL) + { + PVRSRV_ERROR eError; + BUCKET *pNextBucket = pBucket->pNext; + eError = _ChainInsert (pHash, pBucket, ppNewTable, uNewSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_Rehash: call to _ChainInsert failed")); + return eError; + } + pBucket = pNextBucket; + } + } + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function _Resize + + @Description Attempt to resize a hash table, failure to allocate a + new larger hash table is not considered a hard failure. + We simply continue and allow the table to fill up, the + effect is to allow hash chains to become longer. + + @Input pHash - Hash table to resize. + @Input uNewSize - Required table size. + @Return IMG_TRUE Success + IMG_FALSE Failed +******************************************************************************/ +static IMG_BOOL +_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize) +{ + if (uNewSize != pHash->uSize) + { + BUCKET **ppNewTable; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x", + pHash->uSize, uNewSize, pHash->uCount)); + + OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof (BUCKET *) * uNewSize, + (IMG_PVOID*)&ppNewTable, IMG_NULL, + "Hash Table Buckets"); + if (ppNewTable == IMG_NULL) + return IMG_FALSE; + + for (uIndex=0; uIndex<uNewSize; uIndex++) + ppNewTable[uIndex] = IMG_NULL; + + if (_Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize) != PVRSRV_OK) + { + return IMG_FALSE; + } + + OSFreeMem (PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL); + /*not nulling pointer, being reassigned just below*/ + pHash->ppBucketTable = ppNewTable; + pHash->uSize = uNewSize; + } + return IMG_TRUE; +} + + +/*! +****************************************************************************** + @Function HASH_Create_Extended + + @Description Create a self scaling hash table, using the supplied + key size, and the supplied hash and key comparsion + functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the number + of entries in the hash table, not its size in + bytes. + @Input uKeySize - the size of the key, in bytes. + @Input pfnHashFunc - pointer to hash function. + @Input pfnKeyComp - pointer to key comparsion function. + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp) +{ + HASH_TABLE *pHash; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen)); + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(HASH_TABLE), + (IMG_VOID **)&pHash, IMG_NULL, + "Hash Table") != PVRSRV_OK) + { + return IMG_NULL; + } + + pHash->uCount = 0; + pHash->uSize = uInitialLen; + pHash->uMinimumSize = uInitialLen; + pHash->uKeySize = (IMG_UINT32)uKeySize; + pHash->pfnHashFunc = pfnHashFunc; + pHash->pfnKeyComp = pfnKeyComp; + + OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof (BUCKET *) * pHash->uSize, + (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL, + "Hash Table Buckets"); + + if (pHash->ppBucketTable == IMG_NULL) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL); + /*not nulling pointer, out of scope*/ + return IMG_NULL; + } + + for (uIndex=0; uIndex<pHash->uSize; uIndex++) + pHash->ppBucketTable[uIndex] = IMG_NULL; + return pHash; +} + +/*! +****************************************************************************** + @Function HASH_Create + + @Description Create a self scaling hash table with a key + consisting of a single IMG_UINTPTR_T, and using + the default hash and key comparison functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the + number of entries in the hash table, not its size + in bytes. + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen) +{ + return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T), + &HASH_Func_Default, &HASH_Key_Comp_Default); +} + +/*! +****************************************************************************** + @Function HASH_Delete + + @Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been + removed before calling this function. + + @Input pHash - hash table + + @Return None +******************************************************************************/ +IMG_VOID +HASH_Delete (HASH_TABLE *pHash) +{ + if (pHash != IMG_NULL) + { + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete")); + + PVR_ASSERT (pHash->uCount==0); + if(pHash->uCount != 0) + { + PVR_DPF ((PVR_DBG_ERROR, "HASH_Delete: leak detected in hash table!")); + PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing alocations before destroying devmemcontext")); + } + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL); + pHash->ppBucketTable = IMG_NULL; + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + @Function HASH_Insert_Extended + + @Description Insert a key value pair into a hash table created + with HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success + IMG_FALSE - failure +******************************************************************************/ +IMG_BOOL +HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v) +{ + BUCKET *pBucket; + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Insert_Extended: Hash=0x%08x, pKey=0x%08x, v=0x%x", + (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey, v)); + + PVR_ASSERT (pHash != IMG_NULL); + + if (pHash == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter")); + return IMG_FALSE; + } + + if(OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(BUCKET) + pHash->uKeySize, + (IMG_VOID **)&pBucket, IMG_NULL, + "Hash Table entry") != PVRSRV_OK) + { + return IMG_FALSE; + } + + pBucket->v = v; + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/ + OSMemCopy(pBucket->k, pKey, pHash->uKeySize); + if (_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize) != PVRSRV_OK) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, + sizeof(BUCKET) + pHash->uKeySize, + pBucket, IMG_NULL); + return IMG_FALSE; + } + + pHash->uCount++; + + /* check if we need to think about re-balencing */ + if (pHash->uCount << 1 > pHash->uSize) + { + /* Ignore the return code from _Resize because the hash table is + still in a valid state and although not ideally sized, it is still + functional */ + _Resize (pHash, pHash->uSize << 1); + } + + + return IMG_TRUE; +} + +/*! +****************************************************************************** + @Function HASH_Insert + + @Description Insert a key value pair into a hash table created with + HASH_Create. + + @Input pHash - the hash table. + @Input k - the key value. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success. + IMG_FALSE - failure. +******************************************************************************/ +IMG_BOOL +HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v) +{ + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Insert: Hash=0x%x, k=0x%x, v=0x%x", + (IMG_UINTPTR_T)pHash, k, v)); + + return HASH_Insert_Extended(pHash, &k, v); +} + +/*! +****************************************************************************** + @Function HASH_Remove_Extended + + @Description Remove a key from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to key. + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove_Extended: Hash=0x%x, pKey=0x%x", + (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey)); + + PVR_ASSERT (pHash != IMG_NULL); + + if (pHash == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table")); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + IMG_UINTPTR_T v = pBucket->v; + (*ppBucket) = pBucket->pNext; + + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL); + /*not nulling original pointer, already overwritten*/ + + pHash->uCount--; + + /* check if we need to think about re-balencing */ + if (pHash->uSize > (pHash->uCount << 2) && + pHash->uSize > pHash->uMinimumSize) + { + /* Ignore the return code from _Resize because the + hash table is still in a valid state and although + not ideally sized, it is still functional */ + _Resize (pHash, + PRIVATE_MAX (pHash->uSize >> 1, + pHash->uMinimumSize)); + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Remove_Extended: Hash=0x%x, pKey=0x%x = 0x%x", + (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey, v)); + return v; + } + } + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Remove_Extended: Hash=0x%x, pKey=0x%x = 0x0 !!!!", + (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey)); + return 0; +} + +/*! +****************************************************************************** + @Function HASH_Remove + + @Description Remove a key value pair from a hash table created + with HASH_Create. + + @Input pHash - the hash table + @Input k - the key + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k) +{ + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=0x%x, k=0x%x", + (IMG_UINTPTR_T)pHash, k)); + + return HASH_Remove_Extended(pHash, &k); +} + +/*! +****************************************************************************** + @Function HASH_Retrieve_Extended + + @Description Retrieve a value from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey) +{ + BUCKET **ppBucket; + IMG_UINT32 uIndex; + + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve_Extended: Hash=0x%x, pKey=0x%x", + (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey)); + + PVR_ASSERT (pHash != IMG_NULL); + + if (pHash == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table")); + return 0; + } + + uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); + + for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext)) + { + /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ + if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) + { + BUCKET *pBucket = *ppBucket; + IMG_UINTPTR_T v = pBucket->v; + + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Retrieve: Hash=0x%x, pKey=0x%x = 0x%x", + (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey, v)); + return v; + } + } + PVR_DPF ((PVR_DBG_MESSAGE, + "HASH_Retrieve: Hash=0x%x, pKey=0x%x = 0x0 !!!!", + (IMG_UINTPTR_T)pHash, (IMG_UINTPTR_T)pKey)); + return 0; +} + +/*! +****************************************************************************** + @Function HASH_Retrieve + + @Description Retrieve a value from a hash table created with + HASH_Create. + + @Input pHash - the hash table + @Input k - the key + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T +HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k) +{ + PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=0x%x, k=0x%x", + (IMG_UINTPTR_T)pHash, k)); + return HASH_Retrieve_Extended(pHash, &k); +} + +/*! +****************************************************************************** + @Function HASH_Iterate + + @Description Iterate over every entry in the hash table + + @Input pHash - the old hash table + @Input pfnCallback - the size of the old hash table + + @Return Callback error if any, otherwise PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR +HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback) +{ + IMG_UINT32 uIndex; + for (uIndex=0; uIndex < pHash->uSize; uIndex++) + { + BUCKET *pBucket; + pBucket = pHash->ppBucketTable[uIndex]; + while (pBucket != IMG_NULL) + { + PVRSRV_ERROR eError; + BUCKET *pNextBucket = pBucket->pNext; + + eError = pfnCallback((IMG_UINTPTR_T) ((IMG_VOID *) *(pBucket->k)), (IMG_UINTPTR_T) pBucket->v); + + /* The callback might want us to break out early */ + if (eError != PVRSRV_OK) + return eError; + + pBucket = pNextBucket; + } + } + return PVRSRV_OK; +} + +#ifdef HASH_TRACE +/*! +****************************************************************************** + @Function HASH_Dump + + @Description To dump the contents of a hash table in human readable + form. + + @Input pHash - the hash table + + @Return None +******************************************************************************/ +IMG_VOID +HASH_Dump (HASH_TABLE *pHash) +{ + IMG_UINT32 uIndex; + IMG_UINT32 uMaxLength=0; + IMG_UINT32 uEmptyCount=0; + + PVR_ASSERT (pHash != IMG_NULL); + for (uIndex=0; uIndex<pHash->uSize; uIndex++) + { + BUCKET *pBucket; + IMG_UINT32 uLength = 0; + if (pHash->ppBucketTable[uIndex] == IMG_NULL) + { + uEmptyCount++; + } + for (pBucket=pHash->ppBucketTable[uIndex]; + pBucket != IMG_NULL; + pBucket = pBucket->pNext) + { + uLength++; + } + uMaxLength = PRIVATE_MAX (uMaxLength, uLength); + } + + PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d", + pHash->uMinimumSize, pHash->uSize, pHash->uCount)); + PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength)); +} +#endif diff --git a/pvr-source/services4/srvkm/common/lists.c b/pvr-source/services4/srvkm/common/lists.c new file mode 100644 index 0000000..c6e1ee8 --- /dev/null +++ b/pvr-source/services4/srvkm/common/lists.c @@ -0,0 +1,156 @@ +/*************************************************************************/ /*! +@Title Linked list shared functions implementation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implementation of the list iterators for types shared among + more than one file in the services code. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "lists.h" +#include "services_headers.h" + +/*=================================================================== + LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just + once are implemented locally). + ===================================================================*/ + +IMPLEMENT_LIST_ANY_VA(BM_HEAP) +IMPLEMENT_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH_VA(BM_HEAP) +IMPLEMENT_LIST_REMOVE(BM_HEAP) +IMPLEMENT_LIST_INSERT(BM_HEAP) + +IMPLEMENT_LIST_ANY_VA(BM_CONTEXT) +IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL) +IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH(BM_CONTEXT) +IMPLEMENT_LIST_REMOVE(BM_CONTEXT) +IMPLEMENT_LIST_INSERT(BM_CONTEXT) + +IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE) +IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE) + +IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV) +IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK) +IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV) +IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV) + + +/*=================================================================== + BELOW ARE IMPLEMENTED SOME COMMON CALLBACKS USED IN DIFFERENT FILES + ===================================================================*/ + + +/*! +****************************************************************************** + @Function MatchDeviceKM_AnyVaCb + @Description Matchs a device node with an id and optionally a class. + + @Input psDeviceNode - Pointer to the device node. + @Input va - Variable argument list, with te following values: + # ui32DevIndex - Index of de device to match. + # bIgnoreClass - Flag indicating if there's + no need to check the device class. + # eDevClass - Device class, ONLY present if + bIgnoreClass was IMG_FALSE. + + @Return The pointer to the device node if it matchs, IMG_NULL + otherwise. +******************************************************************************/ +IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va) +{ + IMG_UINT32 ui32DevIndex; + IMG_BOOL bIgnoreClass; + PVRSRV_DEVICE_CLASS eDevClass; + + ui32DevIndex = va_arg(va, IMG_UINT32); + bIgnoreClass = va_arg(va, IMG_BOOL); + if (!bIgnoreClass) + { + eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS); + } + else + { + /*this value will never be used, since the short circuit evaluation + of the first clause will stop because bIgnoreClass is true, but the + compiler complains if it's not initialized.*/ + eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32; + } + + if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) && + psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex) + { + return psDeviceNode; + } + return IMG_NULL; +} + +/*! +****************************************************************************** + + @Function MatchPowerDeviceIndex_AnyVaCb + + @Description + Matches a power device with its device index. + + @Input va : variable argument list with: + ui32DeviceIndex : device index + + @Return the pointer to the device it matched, IMG_NULL otherwise. + +******************************************************************************/ +IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va) +{ + IMG_UINT32 ui32DeviceIndex; + + ui32DeviceIndex = va_arg(va, IMG_UINT32); + + if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex) + { + return psPowerDev; + } + else + { + return IMG_NULL; + } +} diff --git a/pvr-source/services4/srvkm/common/mem.c b/pvr-source/services4/srvkm/common/mem.c new file mode 100644 index 0000000..cccdd24 --- /dev/null +++ b/pvr-source/services4/srvkm/common/mem.c @@ -0,0 +1,175 @@ +/*************************************************************************/ /*! +@Title System memory functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description System memory allocation APIs +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "pvr_bridge_km.h" + + +static PVRSRV_ERROR +FreeSharedSysMemCallBack(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bDummy) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + OSFreePages(psKernelMemInfo->ui32Flags, + psKernelMemInfo->uAllocSize, + psKernelMemInfo->pvLinAddrKM, + psKernelMemInfo->sMemBlk.hOSMemHandle); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + psKernelMemInfo, + IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +IMG_EXPORT PVRSRV_ERROR +PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_UINT32 ui32Flags, + IMG_SIZE_T uSize, + PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo) +{ + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + (IMG_VOID **)&psKernelMemInfo, IMG_NULL, + "Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo)); + + ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK; + ui32Flags |= PVRSRV_HAP_MULTI_PROCESS; + psKernelMemInfo->ui32Flags = ui32Flags; + psKernelMemInfo->uAllocSize = uSize; + + if(OSAllocPages(psKernelMemInfo->ui32Flags, + psKernelMemInfo->uAllocSize, + (IMG_UINT32)HOST_PAGESIZE(), + IMG_NULL, + 0, + IMG_NULL, + &psKernelMemInfo->pvLinAddrKM, + &psKernelMemInfo->sMemBlk.hOSMemHandle) + != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block")); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO), + psKernelMemInfo, + 0); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* register with the resman */ + psKernelMemInfo->sMemBlk.hResItem = + ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_MEM_INFO, + psKernelMemInfo, + 0, + &FreeSharedSysMemCallBack); + + *ppsKernelMemInfo = psKernelMemInfo; + + return PVRSRV_OK; +} + + +IMG_EXPORT PVRSRV_ERROR +PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVRSRV_ERROR eError; + + if(psKernelMemInfo->sMemBlk.hResItem) + { + eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem, CLEANUP_WITH_POLL); + } + else + { + eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0, CLEANUP_WITH_POLL); + } + + return eError; +} + + +IMG_EXPORT PVRSRV_ERROR +PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(!psKernelMemInfo) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if(psKernelMemInfo->sMemBlk.hResItem) + { + eError = ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem, IMG_NULL); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDissociateMemFromResmanKM: ResManDissociateRes failed")); + PVR_DBG_BREAK; + return eError; + } + + psKernelMemInfo->sMemBlk.hResItem = IMG_NULL; + } + + return eError; +} + +/****************************************************************************** + End of file (mem.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/mem_debug.c b/pvr-source/services4/srvkm/common/mem_debug.c new file mode 100644 index 0000000..04432b1 --- /dev/null +++ b/pvr-source/services4/srvkm/common/mem_debug.c @@ -0,0 +1,272 @@ +/*************************************************************************/ /*! +@Title Memory debugging routines. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Adds extra memory to the allocations to trace the memory bounds + and other runtime information. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef MEM_DEBUG_C +#define MEM_DEBUG_C + +#if defined(PVRSRV_DEBUG_OS_MEMORY) + +#include "img_types.h" +#include "services_headers.h" + +#if defined (__cplusplus) +extern "C" +{ +#endif + +#define STOP_ON_ERROR 0 + + /* + Allocated Memory Layout: + + --------- \ + Status [OSMEM_DEBUG_INFO] |- TEST_BUFFER_PADDING_STATUS + --------- < + [0xBB]* [raw bytes] |- ui32Size + --------- < + [0xB2]* [raw bytes] |- TEST_BUFFER_PADDING_AFTER + --------- / + */ + + IMG_BOOL MemCheck(const IMG_PVOID pvAddr, const IMG_UINT8 ui8Pattern, IMG_SIZE_T uSize) + { + IMG_UINT8 *pui8Addr; + for (pui8Addr = (IMG_UINT8*)pvAddr; uSize > 0; uSize--, pui8Addr++) + { + if (*pui8Addr != ui8Pattern) + { + return IMG_FALSE; + } + } + return IMG_TRUE; + } + + /* + This function expects the pointer to the user data, not the debug data. + */ + IMG_VOID OSCheckMemDebug(IMG_PVOID pvCpuVAddr, IMG_SIZE_T uSize, const IMG_CHAR *pszFileName, const IMG_UINT32 uLine) + { + OSMEM_DEBUG_INFO const *psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32)pvCpuVAddr - TEST_BUFFER_PADDING_STATUS); + + /* invalid pointer */ + if (pvCpuVAddr == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : null pointer" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /* align */ + if (((IMG_UINT32)pvCpuVAddr&3) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : invalid alignment" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /*check guard region before*/ + if (!MemCheck((IMG_PVOID)psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore))) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region before overwritten" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /*check size*/ + if (uSize != psInfo->uSize) + { + PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : supplied size was different to stored size (0x%X != 0x%X)" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, uSize, psInfo->uSize, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + + /*check size parity*/ + if ((0x01234567 ^ psInfo->uSizeParityCheck) != psInfo->uSize) + { + PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : stored size parity error (0x%X != 0x%X)" + " - referenced %s:%d - allocated %s:%d", + pvCpuVAddr, psInfo->uSize, 0x01234567 ^ psInfo->uSizeParityCheck, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + else + { + /*the stored size is ok, so we use it instead the supplied uSize*/ + uSize = psInfo->uSize; + } + + /*check padding after*/ + if (uSize) + { + if (!MemCheck((IMG_VOID*)((IMG_UINT32)pvCpuVAddr + uSize), 0xB2, TEST_BUFFER_PADDING_AFTER)) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region after overwritten" + " - referenced from %s:%d - allocated from %s:%d", + pvCpuVAddr, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + } + } + + /* allocated... */ + if (psInfo->eValid != isAllocated) + { + PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : not allocated (freed? %d)" + " - referenced %s:%d - freed %s:%d", + pvCpuVAddr, psInfo->eValid == isFree, + pszFileName, uLine, + psInfo->sFileName, psInfo->uLineNo)); + while (STOP_ON_ERROR); + } + } + + IMG_VOID debug_strcpy(IMG_CHAR *pDest, const IMG_CHAR *pSrc) + { + IMG_SIZE_T i = 0; + + for (; i < 128; i++) /*changed to 128 to match the filename array size*/ + { + *pDest = *pSrc; + if (*pSrc == '\0') break; + pDest++; + pSrc++; + } + } + + PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID *ppvCpuVAddr, + IMG_HANDLE *phBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line) + { + OSMEM_DEBUG_INFO *psInfo; + + PVRSRV_ERROR eError; + + eError = OSAllocMem_Debug_Linux_Memory_Allocations(ui32Flags, + ui32Size + TEST_BUFFER_PADDING, + ppvCpuVAddr, + phBlockAlloc, + pszFilename, + ui32Line); + + if (eError != PVRSRV_OK) + { + return eError; + } + + OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS, 0xBB, ui32Size); + OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + ui32Size + TEST_BUFFER_PADDING_STATUS, 0xB2, TEST_BUFFER_PADDING_AFTER); + + /*fill the dbg info struct*/ + psInfo = (OSMEM_DEBUG_INFO *)(*ppvCpuVAddr); + + OSMemSet(psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore)); + debug_strcpy(psInfo->sFileName, pszFilename); + psInfo->uLineNo = ui32Line; + psInfo->eValid = isAllocated; + psInfo->uSize = ui32Size; + psInfo->uSizeParityCheck = 0x01234567 ^ ui32Size; + + /*point to the user data section*/ + *ppvCpuVAddr = (IMG_PVOID) ((IMG_UINT32)*ppvCpuVAddr)+TEST_BUFFER_PADDING_STATUS; + +#ifdef PVRSRV_LOG_MEMORY_ALLOCS + /*this is here to simplify the surounding logging macro, that is a expression + maybe the macro should be an expression */ + PVR_TRACE(("Allocated pointer (after debug info): 0x%X from %s:%d", *ppvCpuVAddr, pszFilename, ui32Line)); +#endif + + return PVRSRV_OK; + } + + PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID pvCpuVAddr, + IMG_HANDLE hBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line) + { + OSMEM_DEBUG_INFO *psInfo; + + /*check dbginfo (arg pointing to user memory)*/ + OSCheckMemDebug(pvCpuVAddr, ui32Size, pszFilename, ui32Line); + + /*mark memory as freed*/ + OSMemSet(pvCpuVAddr, 0xBF, ui32Size + TEST_BUFFER_PADDING_AFTER); + + /*point to the starting address of the total allocated memory*/ + psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32) pvCpuVAddr - TEST_BUFFER_PADDING_STATUS); + + /*update dbg info struct*/ + psInfo->uSize = 0; + psInfo->uSizeParityCheck = 0; + psInfo->eValid = isFree; + psInfo->uLineNo = ui32Line; + debug_strcpy(psInfo->sFileName, pszFilename); + + return OSFreeMem_Debug_Linux_Memory_Allocations(ui32Flags, ui32Size + TEST_BUFFER_PADDING, psInfo, hBlockAlloc, pszFilename, ui32Line); + } + +#if defined (__cplusplus) + +} +#endif + +#endif /* PVRSRV_DEBUG_OS_MEMORY */ + +#endif /* MEM_DEBUG_C */ diff --git a/pvr-source/services4/srvkm/common/metrics.c b/pvr-source/services4/srvkm/common/metrics.c new file mode 100644 index 0000000..7370ec1 --- /dev/null +++ b/pvr-source/services4/srvkm/common/metrics.c @@ -0,0 +1,209 @@ +/*************************************************************************/ /*! +@Title Time measuring functions. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "metrics.h" + +/* VGX: */ +#if defined(SUPPORT_VGX) +#include "vgxapi_km.h" +#endif + +/* SGX: */ +#if defined(SUPPORT_SGX) +#include "sgxapi_km.h" +#endif + +#if defined(DEBUG) || defined(TIMING) + +static volatile IMG_UINT32 *pui32TimerRegister = 0; + +#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total +#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS) +#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count + + +Temporal_Data asTimers[PVRSRV_NUM_TIMERS]; + + +/*********************************************************************************** + Function Name : PVRSRVTimeNow + Inputs : None + Outputs : None + Returns : Current timer register value + Description : Returns the current timer register value +************************************************************************************/ +IMG_UINT32 PVRSRVTimeNow(IMG_VOID) +{ + if (!pui32TimerRegister) + { + static IMG_BOOL bFirstTime = IMG_TRUE; + + if (bFirstTime) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up")); + + bFirstTime = IMG_FALSE; + } + + return 0; + } + +#if defined(__sh__) + + return (0xffffffff-*pui32TimerRegister); + +#else /* defined(__sh__) */ + + return 0; + +#endif /* defined(__sh__) */ +} + + +/*********************************************************************************** + Function Name : PVRSRVGetCPUFreq + Inputs : None + Outputs : None + Returns : CPU timer frequency + Description : Returns the CPU timer frequency +************************************************************************************/ +static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID) +{ + IMG_UINT32 ui32Time1, ui32Time2; + + ui32Time1 = PVRSRVTimeNow(); + + OSWaitus(1000000); + + ui32Time2 = PVRSRVTimeNow(); + + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1)); + + return (ui32Time2 - ui32Time1); +} + + +/*********************************************************************************** + Function Name : PVRSRVSetupMetricTimers + Inputs : pvDevInfo + Outputs : None + Returns : None + Description : Resets metric timers and sets up the timer register +************************************************************************************/ +IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo) +{ + IMG_UINT32 ui32Loop; + + PVR_UNREFERENCED_PARAMETER(pvDevInfo); + + for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) + { + asTimers[ui32Loop].ui32Total = 0; + asTimers[ui32Loop].ui32Count = 0; + } + + #if defined(__sh__) + + /* timer control register */ + // clock / 1024 when TIMER_DIVISOR = 4 + // underflow int disabled + // we get approx 38 uS per timer tick + *TCR_2 = TIMER_DIVISOR; + + /* reset the timer counter to 0 */ + *TCOR_2 = *TCNT_2 = (IMG_UINT)0xffffffff; + + /* start timer 2 */ + *TST_REG |= (IMG_UINT8)0x04; + + pui32TimerRegister = (IMG_UINT32 *)TCNT_2; + + #else /* defined(__sh__) */ + + pui32TimerRegister = 0; + + #endif /* defined(__sh__) */ +} + + +/*********************************************************************************** + Function Name : PVRSRVOutputMetricTotals + Inputs : None + Outputs : None + Returns : None + Description : Displays final metric data +************************************************************************************/ +IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID) +{ + IMG_UINT32 ui32TicksPerMS, ui32Loop; + + ui32TicksPerMS = PVRSRVGetCPUFreq(); + + if (!ui32TicksPerMS) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq")); + return; + } + + for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++) + { + if (asTimers[ui32Loop].ui32Count & 0x80000000L) + { + PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop)); + } + } +#if 0 + /* + ** EXAMPLE TIMER OUTPUT + */ + PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1))); + PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1))); + PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1))); +#endif +} + +#endif /* defined(DEBUG) || defined(TIMING) */ + +/****************************************************************************** + End of file (metrics.c) +******************************************************************************/ + diff --git a/pvr-source/services4/srvkm/common/osfunc_common.c b/pvr-source/services4/srvkm/common/osfunc_common.c new file mode 100644 index 0000000..19ba9ea --- /dev/null +++ b/pvr-source/services4/srvkm/common/osfunc_common.c @@ -0,0 +1,48 @@ +/*************************************************************************/ /*! +@Title Wrapper layer for osfunc routines that have common code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Adds extra memory to the allocations to trace the memory bounds + and other runtime information. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_types.h" +#include "services_headers.h" +#include "osfunc.h" + + diff --git a/pvr-source/services4/srvkm/common/pdump_common.c b/pvr-source/services4/srvkm/common/pdump_common.c new file mode 100644 index 0000000..2d96dc3 --- /dev/null +++ b/pvr-source/services4/srvkm/common/pdump_common.c @@ -0,0 +1,2967 @@ +/*************************************************************************/ /*! +@Title Common PDump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PDUMP) +#include <stdarg.h> + +#include "services_headers.h" +#include "perproc.h" + +/* pdump headers */ +#include "pdump_km.h" +#include "pdump_int.h" + +/* Allow temporary buffer size override */ +#if !defined(PDUMP_TEMP_BUFFER_SIZE) +#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U) +#endif + +/* DEBUG */ +#if 1 +#define PDUMP_DBG(a) PDumpOSDebugPrintf (a) +#else +#define PDUMP_DBG(a) +#endif + + +#define PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x))) +#define VPTR_PLUS(p, x) PTR_PLUS(IMG_VOID *, p, x) +#define VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x)) +#define MAX_PDUMP_MMU_CONTEXTS (32) +static IMG_VOID *gpvTempBuffer = IMG_NULL; +static IMG_HANDLE ghTempBufferBlockAlloc; +static IMG_UINT16 gui16MMUContextUsage = 0; + +#if defined(PDUMP_DEBUG_OUTFILES) +/* counter increments each time debug write is called */ +IMG_UINT32 g_ui32EveryLineCounter = 1U; +#endif + +#ifdef INLINE_IS_PRAGMA +#pragma inline(_PDumpIsPersistent) +#endif +static INLINE +IMG_BOOL _PDumpIsPersistent(IMG_VOID) +{ + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + + if(psPerProc == IMG_NULL) + { + /* only occurs early in driver init, and init phase is already persistent */ + return IMG_FALSE; + } + return psPerProc->bPDumpPersistent; +} + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + + +static INLINE +IMG_BOOL _PDumpIsProcessActive(IMG_VOID) +{ + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc == IMG_NULL) + { + /* FIXME: kernel process logs some comments when kernel module is + * loaded, want to keep those. + */ + return IMG_TRUE; + } + return psPerProc->bPDumpActive; +} + +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + +#if defined(PDUMP_DEBUG_OUTFILES) +static INLINE +IMG_UINT32 _PDumpGetPID(IMG_VOID) +{ + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc == IMG_NULL) + { + /* Kernel PID */ + return 0; + } + return psPerProc->ui32PID; +} +#endif /* PDUMP_DEBUG_OUTFILES */ + +/************************************************************************** + * Function Name : GetTempBuffer + * Inputs : None + * Outputs : None + * Returns : Temporary buffer address, or IMG_NULL + * Description : Get temporary buffer address. +**************************************************************************/ +static IMG_VOID *GetTempBuffer(IMG_VOID) +{ + /* + * Allocate the temporary buffer, it it hasn't been allocated already. + * Return the address of the temporary buffer, or IMG_NULL if it + * couldn't be allocated. + * It is expected that the buffer will be allocated once, at driver + * load time, and left in place until the driver unloads. + */ + + if (gpvTempBuffer == IMG_NULL) + { + PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + PDUMP_TEMP_BUFFER_SIZE, + &gpvTempBuffer, + &ghTempBufferBlockAlloc, + "PDUMP Temporary Buffer"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed: %d", eError)); + } + } + + return gpvTempBuffer; +} + +static IMG_VOID FreeTempBuffer(IMG_VOID) +{ + + if (gpvTempBuffer != IMG_NULL) + { + PVRSRV_ERROR eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + PDUMP_TEMP_BUFFER_SIZE, + gpvTempBuffer, + ghTempBufferBlockAlloc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreeTempBuffer: OSFreeMem failed: %d", eError)); + } + else + { + gpvTempBuffer = IMG_NULL; + } + } +} + +IMG_VOID PDumpInitCommon(IMG_VOID) +{ + /* Allocate temporary buffer for copying from user space */ + (IMG_VOID) GetTempBuffer(); + + /* Call environment specific PDump initialisation */ + PDumpInit(); +} + +IMG_VOID PDumpDeInitCommon(IMG_VOID) +{ + /* Free temporary buffer */ + FreeTempBuffer(); + + /* Call environment specific PDump Deinitialisation */ + PDumpDeInit(); +} + +IMG_BOOL PDumpIsSuspended(IMG_VOID) +{ + return PDumpOSIsSuspended(); +} + +IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID) +{ +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if( _PDumpIsProcessActive() ) + { + return PDumpOSIsCaptureFrameKM(); + } + return IMG_FALSE; +#else + return PDumpOSIsCaptureFrameKM(); +#endif +} + +PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame) +{ +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if( _PDumpIsProcessActive() ) + { + return PDumpOSSetFrameKM(ui32Frame); + } + return PVRSRV_OK; +#else + return PDumpOSSetFrameKM(ui32Frame); +#endif +} + +/************************************************************************** + * Function Name : PDumpRegWithFlagsKM + * Inputs : pszPDumpDevName, Register offset, and value to write + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write +**************************************************************************/ +PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING() + PDUMP_DBG(("PDumpRegWithFlagsKM")); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X\r\n", + pszPDumpRegName, ui32Reg, ui32Data); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpRegKM + * Inputs : Register offset, and value to write + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Create a PDUMP string, which represents a register write +**************************************************************************/ +PVRSRV_ERROR PDumpRegKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data) +{ + return PDumpRegWithFlagsKM(pszPDumpRegName, ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS); +} + +/************************************************************************** + * Function Name : PDumpRegPolWithFlagsKM + * Inputs : Description of what this register read is trying to do + * pszPDumpDevName + * Register offset + * expected value + * mask for that value + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents a register read + * with the expected value +**************************************************************************/ +PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator) +{ + /* Timings correct for linux and XP */ + #define POLL_DELAY 1000U + #define POLL_COUNT_LONG (2000000000U / POLL_DELAY) + #define POLL_COUNT_SHORT (1000000U / POLL_DELAY) + + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PollCount; + + PDUMP_GET_SCRIPT_STRING(); + PDUMP_DBG(("PDumpRegPolWithFlagsKM")); + if ( _PDumpIsPersistent() ) + { + /* Don't pdump-poll if the process is persistent */ + return PVRSRV_OK; + } + + ui32PollCount = POLL_COUNT_LONG; + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d\r\n", + pszPDumpRegName, ui32RegAddr, ui32RegValue, + ui32Mask, eOperator, ui32PollCount, POLL_DELAY); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpRegPol + * Inputs : Description of what this register read is trying to do + * pszPDumpDevName + Register offset + * expected value + * mask for that value + * Outputs : None + * Returns : None + * Description : Create a PDUMP string which represents a register read + * with the expected value +**************************************************************************/ +PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, PDUMP_POLL_OPERATOR eOperator) +{ + return PDumpRegPolWithFlagsKM(pszPDumpRegName, ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS, eOperator); +} + +/************************************************************************** + * Function Name : PDumpMallocPages + * Inputs : psDevID, ui32DevVAddr, pvLinAddr, ui32NumBytes, hOSMemHandle + * : hUniqueTag + * Outputs : None + * Returns : None + * Description : Malloc memory pages + +FIXME: This function assumes pvLinAddr is the address of the start of the +block for this hOSMemHandle. +If this isn't true, the call to PDumpOSCPUVAddrToDevPAddr below will be +incorrect. (Consider using OSMemHandleToCPUPAddr() instead?) +The only caller at the moment is in buffer_manager.c, which does the right +thing. +**************************************************************************/ +PVRSRV_ERROR PDumpMallocPages (PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_UINT32 ui32DevVAddr, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_BOOL bShared, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_PUINT8 pui8LinAddr; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32NumPages; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32Page; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; + + PDUMP_GET_SCRIPT_STRING(); +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Always dump physical pages backing a shared allocation */ + ui32Flags |= ( _PDumpIsPersistent() || bShared ) ? PDUMP_FLAGS_PERSISTENT : 0; +#else + PVR_UNREFERENCED_PARAMETER(bShared); + ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + + /* However, lin addr is only required in non-linux OSes */ +#if !defined(LINUX) + PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & HOST_PAGEMASK) == 0); +#endif + + PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & HOST_PAGEMASK) == 0); + PVR_ASSERT(((IMG_UINT32) ui32NumBytes & HOST_PAGEMASK) == 0); + + /* + Write a comment to the PDump2 script streams indicating the memory allocation + */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :%s:VA_%08X 0x%08X %u\r\n", + psDevID->pszPDumpDevName, ui32DevVAddr, ui32NumBytes, ui32PageSize); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory allocation + */ + pui8LinAddr = (IMG_PUINT8) pvLinAddr; + ui32Offset = 0; + ui32NumPages = ui32NumBytes / ui32PageSize; + while (ui32NumPages) + { + ui32NumPages--; + + /* See FIXME in function header. + * Currently: linux pdump uses OSMemHandle and Offset + * other OSes use the LinAddr. + */ + /* Calculate the device physical address for this page */ + PDumpOSCPUVAddrToDevPAddr(psDevID->eDeviceType, + hOSMemHandle, + ui32Offset, + pui8LinAddr, + ui32PageSize, + &sDevPAddr); + ui32Page = (IMG_UINT32)(sDevPAddr.uiAddr / ui32PageSize); + /* increment kernel virtual address */ + pui8LinAddr += ui32PageSize; + ui32Offset += ui32PageSize; + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:PA_%08X%08X %u %u 0x%08X\r\n", + psDevID->pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + ui32Page * ui32PageSize, + ui32PageSize, + ui32PageSize, + ui32Page * ui32PageSize); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + } + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpMallocPageTable + * Inputs : psDevId, pvLinAddr, ui32NumBytes, hUniqueTag + * Outputs : None + * Returns : None + * Description : Malloc memory page table +**************************************************************************/ +PVRSRV_ERROR PDumpMallocPageTable (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32PTSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_DEV_PHYADDR sDevPAddr; + + PDUMP_GET_SCRIPT_STRING(); + + PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & (ui32PTSize - 1)) == 0); + ui32Flags |= PDUMP_FLAGS_CONTINUOUS; + ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0; + + /* + Write a comment to the PDump2 script streams indicating the memory allocation + */ + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "-- MALLOC :%s:PAGE_TABLE 0x%08X %u\r\n", + psDevId->pszPDumpDevName, + ui32PTSize, + ui32PTSize); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory allocation + */ + // FIXME: we'll never need more than a 4k page for a pagetable + // fixing to 1 page for now. + // note: when the mmu code supports packed pagetables the PTs + // will be as small as 16bytes + + PDumpOSCPUVAddrToDevPAddr(psDevId->eDeviceType, + hOSMemHandle, /* um - does this mean the pvLinAddr would be ignored? Is that safe? */ + ui32Offset, + (IMG_PUINT8) pvLinAddr, + ui32PTSize, + &sDevPAddr); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:PA_%08X%08X 0x%X %u 0x%08X\r\n", + psDevId->pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr, + ui32PTSize,//size + ui32PTSize,//alignment + sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpFreePages + * Inputs : psBMHeap, sDevVAddr, ui32NumBytes, hUniqueTag, + bInterLeaved + * Outputs : None + * Returns : None + * Description : Free memory pages +**************************************************************************/ +PVRSRV_ERROR PDumpFreePages (BM_HEAP *psBMHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_HANDLE hUniqueTag, + IMG_BOOL bInterleaved, + IMG_BOOL bSparse) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32NumPages, ui32PageCounter; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; + PVRSRV_DEVICE_NODE *psDeviceNode; + + PDUMP_GET_SCRIPT_STRING(); + + PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0); + PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (ui32PageSize - 1)) == 0); + + psDeviceNode = psBMHeap->pBMContext->psDeviceNode; + ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0; + + /* + Write a comment to the PDUMP2 script streams indicating the memory free + */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:VA_%08X\r\n", + psDeviceNode->sDevId.pszPDumpDevName, sDevVAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + return eErr; + } + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* if we're dumping a shared heap, need to ensure phys allocation + * is freed even if this app isn't the one marked for pdumping + */ + { + PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode; + + if( psDeviceNode->pfnMMUIsHeapShared(psBMHeap->pMMUHeap) ) + { + ui32Flags |= PDUMP_FLAGS_PERSISTENT; + } + } +#endif + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory free + */ + ui32NumPages = ui32NumBytes / ui32PageSize; + for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++) + { + if (!bInterleaved || (ui32PageCounter % 2) == 0) + { + sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr); + + /* With sparse mappings we expect spaces */ + if (bSparse && (sDevPAddr.uiAddr == 0)) + { + continue; + } + + PVR_ASSERT(sDevPAddr.uiAddr != 0); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:PA_%08X%08X\r\n", + psDeviceNode->sDevId.pszPDumpDevName, (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + } + else + { + /* Gap pages in an interleaved allocation should be ignored. */ + } + + sDevVAddr.uiAddr += ui32PageSize; + } + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpFreePageTable + * Inputs : psDevID, pvLinAddr, ui32NumBytes, hUniqueTag + * Outputs : None + * Returns : None + * Description : Free memory page table +**************************************************************************/ +PVRSRV_ERROR PDumpFreePageTable (PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32PTSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_DEV_PHYADDR sDevPAddr; + + PDUMP_GET_SCRIPT_STRING(); + + PVR_UNREFERENCED_PARAMETER(ui32PTSize); + ui32Flags |= PDUMP_FLAGS_CONTINUOUS; + ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0; + + /* override QAC warning about wrap around */ + PVR_ASSERT(((IMG_UINTPTR_T)pvLinAddr & (ui32PTSize-1UL)) == 0); /* PRQA S 3382 */ + + /* + Write a comment to the PDUMP2 script streams indicating the memory free + */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:PAGE_TABLE\r\n", psDevID->pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + Write to the MMU script stream indicating the memory free + */ + // FIXME: we'll never need more than a 4k page for a pagetable + // fixing to 1 page for now. + // note: when the mmu code supports packed pagetables the PTs + // will be as small as 16bytes + + PDumpOSCPUVAddrToDevPAddr(psDevID->eDeviceType, + hOSMemHandle, /* um - does this mean the pvLinAddr would be ignored? Is that safe? */ + 0, + (IMG_PUINT8) pvLinAddr, + ui32PTSize, + &sDevPAddr); + + { + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:PA_%08X%08X\r\n", + psDevID->pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + } + + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpPDRegWithFlags + * Inputs : psMMUAttrib + * : ui32Reg + * : ui32Data + * : hUniqueTag + * Outputs : None + * Returns : None + * Description : Kernel Services internal pdump memory API + * Used for registers specifying physical addresses + e.g. MMU page directory register +**************************************************************************/ +PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_CHAR *pszRegString; + PDUMP_GET_SCRIPT_STRING() + + if(psMMUAttrib->pszPDRegRegion != IMG_NULL) + { + pszRegString = psMMUAttrib->pszPDRegRegion; + } + else + { + pszRegString = psMMUAttrib->sDevId.pszPDumpRegName; + } + + /* + Write to the MMU script stream indicating the physical page directory + */ +#if defined(SGX_FEATURE_36BIT_MMU) + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, + "WRW :%s:$1 :%s:PA_%08X%08X:0x0\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)hUniqueTag, + (ui32Data & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PDEAlignShift); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :%s:$1 :%s:$1 0x4\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, + "WRW :%s:0x%08X: %s:$1\r\n", + pszRegString, + ui32Reg, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); +#else + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "WRW :%s:0x%08X :%s:PA_%08X%08X:0x%08X\r\n", + pszRegString, + ui32Reg, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + (ui32Data & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PDEAlignShift, + ui32Data & ~psMMUAttrib->ui32PDEMask); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); +#endif + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpPDReg + * Inputs : psMMUAttrib + : ui32Reg + * : ui32Data + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Kernel Services internal pdump memory API + * Used for registers specifying physical addresses + e.g. MMU page directory register +**************************************************************************/ +PVRSRV_ERROR PDumpPDReg (PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_HANDLE hUniqueTag) +{ + return PDumpPDRegWithFlags(psMMUAttrib, ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS, hUniqueTag); +} + +/************************************************************************** + * Function Name : PDumpMemPolKM + * Inputs : psMemInfo + * : ui32Offset + * : ui32Value + * : ui32Mask + * : eOperator + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Implements Client pdump memory poll API +**************************************************************************/ +PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + #define MEMPOLL_DELAY (1000) + #define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY) + + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PageOffset; + IMG_UINT8 *pui8LinAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_DEV_VIRTADDR sDevVPageAddr; + PDUMP_MMU_ATTRIB *psMMUAttrib; + + PDUMP_GET_SCRIPT_STRING(); + + if (PDumpOSIsSuspended()) + { + return PVRSRV_OK; + } + + if ( _PDumpIsPersistent() ) + { + /* Don't pdump-poll if the process is persistent */ + return PVRSRV_OK; + } + + /* Check the offset and size don't exceed the bounds of the allocation */ + PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->uAllocSize); + + psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + + /* + Write a comment to the PDump2 script streams indicating the virtual memory pol + */ + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "-- POL :%s:VA_%08X 0x%08X 0x%08X %d %d %d\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMemInfo->sDevVAddr.uiAddr + ui32Offset, + ui32Value, + ui32Mask, + eOperator, + MEMPOLL_COUNT, + MEMPOLL_DELAY); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + + pui8LinAddr = psMemInfo->pvLinAddrKM; + + /* Advance address by offset */ + pui8LinAddr += ui32Offset; + + /* + query the buffer manager for the physical pages that back the + virtual address + */ + PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle, + ui32Offset, + pui8LinAddr, + psMMUAttrib->ui32DataPageMask, + &ui32PageOffset); + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset; + + PVR_ASSERT((sDevVPageAddr.uiAddr & psMMUAttrib->ui32DataPageMask) == 0); + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageOffset; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "POL :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %d %d %d\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask), + sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask), + ui32Value, + ui32Mask, + eOperator, + MEMPOLL_COUNT, + MEMPOLL_DELAY); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : _PDumpMemIntKM + * Inputs : psMemInfo + * : ui32Offset + * : ui32Bytes + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Implements Client pdump mem API +**************************************************************************/ +static PVRSRV_ERROR _PDumpMemIntKM(IMG_PVOID pvAltLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32NumPages; + IMG_UINT32 ui32PageByteOffset; + IMG_UINT32 ui32BlockBytes; + IMG_UINT8* pui8LinAddr; + IMG_UINT8* pui8DataLinAddr = IMG_NULL; + IMG_DEV_VIRTADDR sDevVPageAddr; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32ParamOutPos; + PDUMP_MMU_ATTRIB *psMMUAttrib; + IMG_UINT32 ui32DataPageSize; + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + /* PRQA S 3415 1 */ /* side effects desired */ + if (ui32Bytes == 0 || PDumpOSIsSuspended()) + { + return PVRSRV_OK; + } + + psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + + /* + check the offset and size don't exceed the bounds of the allocation + */ + PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->uAllocSize); + + if (!PDumpOSJTInitialised()) + { + return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE; + } + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* if we're dumping a shared heap, need to ensure phys allocation + * is initialised even if this app isn't the one marked for pdumping + */ + { + BM_HEAP *pHeap = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap; + PVRSRV_DEVICE_NODE *psDeviceNode = pHeap->pBMContext->psDeviceNode; + + if( psDeviceNode->pfnMMUIsHeapShared(pHeap->pMMUHeap) ) + { + ui32Flags |= PDUMP_FLAGS_PERSISTENT; + } + } +#endif + + /* setup memory addresses */ + if(pvAltLinAddr) + { + pui8DataLinAddr = pvAltLinAddr; + } + else if(psMemInfo->pvLinAddrKM) + { + pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset; + } + pui8LinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM; + sDevVAddr = psMemInfo->sDevVAddr; + + /* advance address by offset */ + sDevVAddr.uiAddr += ui32Offset; + pui8LinAddr += ui32Offset; + + PVR_ASSERT(pui8DataLinAddr); + + PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags); + + ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2); + + /* + write the binary data up-front. + */ + if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2), + pui8DataLinAddr, + ui32Bytes, + ui32Flags)) + { + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + + if (PDumpOSGetParamFileNum() == 0) + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm"); + } + else + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum()); + } + if(eErr != PVRSRV_OK) + { + return eErr; + } + + /* + Write a comment to the PDump2 script streams indicating the virtual memory load + */ + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "-- LDB :%s:VA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + psMemInfo->sDevVAddr.uiAddr, + ui32Offset, + ui32Bytes, + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* + query the buffer manager for the physical pages that back the + virtual address + */ + PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle, + ui32Offset, + pui8LinAddr, + psMMUAttrib->ui32DataPageMask, + &ui32PageByteOffset); + ui32DataPageSize = psMMUAttrib->ui32DataPageMask + 1; + ui32NumPages = (ui32PageByteOffset + ui32Bytes + psMMUAttrib->ui32DataPageMask) / ui32DataPageSize; + + while(ui32NumPages) + { + ui32NumPages--; + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset; + + if (ui32DataPageSize <= PDUMP_TEMP_BUFFER_SIZE) + { + /* if a page fits within temp buffer, we should dump in page-aligned chunks. */ + PVR_ASSERT((sDevVPageAddr.uiAddr & psMMUAttrib->ui32DataPageMask) == 0); + } + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageByteOffset; + + /* how many bytes to dump from this page */ + if (ui32PageByteOffset + ui32Bytes > ui32DataPageSize) + { + /* dump up to the page boundary */ + ui32BlockBytes = ui32DataPageSize - ui32PageByteOffset; + } + else + { + /* dump what's left */ + ui32BlockBytes = ui32Bytes; + } + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "LDB :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask), + sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask), + ui32BlockBytes, + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + /* update details for next page */ + +#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) + /* page may be larger than pdump temporary buffer */ + ui32PageByteOffset = (ui32PageByteOffset + ui32BlockBytes) % ui32DataPageSize; +#else + /* page offset 0 after first page dump */ + ui32PageByteOffset = 0; +#endif + /* bytes left over */ + ui32Bytes -= ui32BlockBytes; /* PRQA S 3382 */ /* QAC missed MIN test */ + /* advance devVaddr */ + sDevVAddr.uiAddr += ui32BlockBytes; + /* advance the cpuVaddr */ + pui8LinAddr += ui32BlockBytes; + /* update the file write offset */ + ui32ParamOutPos += ui32BlockBytes; + } + + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpMemKM + * Inputs : psMemInfo + * : ui32Offset + * : ui32Bytes + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Implements Client pdump mem API +**************************************************************************/ +PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + /* + For now we don't support dumping sparse allocations that + are from within the kernel, or are from UM but without a + alternative linear address + */ + PVR_ASSERT((psMemInfo->ui32Flags & PVRSRV_MEM_SPARSE) == 0); + + if (psMemInfo->ui32Flags & PVRSRV_MEM_SPARSE) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + else + { + return _PDumpMemIntKM(pvAltLinAddr, + psMemInfo, + ui32Offset, + ui32Bytes, + ui32Flags, + hUniqueTag); + } +} + +PVRSRV_ERROR PDumpMemPDEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2) +{ + PDUMP_MMU_ATTRIB sMMUAttrib; + + /* Override the (variable) PT size since PDs are always 4K in size */ + sMMUAttrib = *psMMUAttrib; + sMMUAttrib.ui32PTSize = (IMG_UINT32)HOST_PAGESIZE(); + return PDumpMemPTEntriesKM( &sMMUAttrib, + hOSMemHandle, + pvLinAddr, + ui32Bytes, + ui32Flags, + bInitialisePages, + hUniqueTag1, + hUniqueTag2); +} + +/************************************************************************** + * Function Name : PDumpMemPTEntriesKM + * Inputs : psMMUAttrib - MMU attributes for pdump + * : pvLinAddr - CPU address of PT base + * : ui32Bytes - size + * : ui32Flags - pdump flags + * : bInitialisePages - whether to initialise pages from file + * : hUniqueTag1 - ID for PT physical page + * : hUniqueTag2 - ID for target physical page (if !bInitialisePages) + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Kernel Services internal pdump memory API + * Used for memory without DevVAddress mappings + e.g. MMU page tables + FIXME: This function doesn't support non-4k data pages, + e.g. dummy data page +**************************************************************************/ +PVRSRV_ERROR PDumpMemPTEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32NumPages; + IMG_UINT32 ui32PageOffset; + IMG_UINT32 ui32BlockBytes; + IMG_UINT8* pui8LinAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32ParamOutPos; + IMG_UINT32 ui32PageMask; /* mask for the physical page backing the PT */ + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0; + + if (PDumpOSIsSuspended()) + { + return PVRSRV_OK; + } + + if (!PDumpOSJTInitialised()) + { + return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE; + } + + if (!pvLinAddr) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags); + + ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2); + + if (bInitialisePages) + { + /* + write the binary data up-front + Use the 'continuous' memory stream + */ + if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2), + pvLinAddr, + ui32Bytes, + ui32Flags | PDUMP_FLAGS_CONTINUOUS)) + { + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + + if (PDumpOSGetParamFileNum() == 0) + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm"); + } + else + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum()); + } + if(eErr != PVRSRV_OK) + { + return eErr; + } + } + + /* + Mask for the physical page address backing the PT + The PT size can be less than 4k with variable page size support + The PD size is always 4k + FIXME: This won't work for dumping the dummy data page + */ + ui32PageMask = psMMUAttrib->ui32PTSize - 1; + + /* + Write to the MMU script stream indicating the physical page table entries + */ + /* physical pages that back the virtual address */ + ui32PageOffset = (IMG_UINT32)((IMG_UINTPTR_T)pvLinAddr & (psMMUAttrib->ui32PTSize - 1)); + ui32NumPages = (ui32PageOffset + ui32Bytes + psMMUAttrib->ui32PTSize - 1) / psMMUAttrib->ui32PTSize; + pui8LinAddr = (IMG_UINT8*) pvLinAddr; + + while (ui32NumPages) + { + ui32NumPages--; + /* FIXME: if we used OSMemHandleToCPUPAddr() here, we might be + able to lose the lin addr arg. At least one thing that + would need to be done here is to pass in an offset, as the + calling function doesn't necessarily give us the lin addr + of the start of the mem area. Probably best to keep the + lin addr arg for now - but would be nice to remove the + redundancy */ + sCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle, pui8LinAddr); + sDevPAddr = SysCpuPAddrToDevPAddr(psMMUAttrib->sDevId.eDeviceType, sCpuPAddr); + + /* how many bytes to dump from this page */ + if (ui32PageOffset + ui32Bytes > psMMUAttrib->ui32PTSize) + { + /* dump up to the page boundary */ + ui32BlockBytes = psMMUAttrib->ui32PTSize - ui32PageOffset; + } + else + { + /* dump what's left */ + ui32BlockBytes = ui32Bytes; + } + + /* + Write a comment to the MMU script stream indicating the page table load + */ + + if (bInitialisePages) + { + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "LDB :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1, + sDevPAddr.uiAddr & ~ui32PageMask, + sDevPAddr.uiAddr & ui32PageMask, + ui32BlockBytes, + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + } + else + { + for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32)) + { + IMG_UINT32 ui32PTE = *((IMG_UINT32 *)(IMG_UINTPTR_T)(pui8LinAddr + ui32Offset)); /* PRQA S 3305 */ /* strict pointer */ + + if ((ui32PTE & psMMUAttrib->ui32PDEMask) != 0) + { + /* PT entry points to non-null page */ +#if defined(SGX_FEATURE_36BIT_MMU) + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:$1 :%s:PA_%08X%08X:0x0\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)hUniqueTag2, + (ui32PTE & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PTEAlignShift); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :%s:$1 :%s:$1 0x4\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "OR :%s:$1 :%s:$1 0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + ui32PTE & ~psMMUAttrib->ui32PDEMask); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X :%s:$1\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)hUniqueTag1, + (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask, + (sDevPAddr.uiAddr + ui32Offset) & ui32PageMask, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); +#else + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X :%s:PA_%08X%08X:0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1, + (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask, + (sDevPAddr.uiAddr + ui32Offset) & ui32PageMask, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag2, + (ui32PTE & psMMUAttrib->ui32PDEMask) << psMMUAttrib->ui32PTEAlignShift, + ui32PTE & ~psMMUAttrib->ui32PDEMask); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); +#endif + } + else + { +#if !defined(FIX_HW_BRN_31620) + PVR_ASSERT((ui32PTE & psMMUAttrib->ui32PTEValid) == 0UL); +#endif + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X 0x%08X%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1, + (sDevPAddr.uiAddr + ui32Offset) & ~ui32PageMask, + (sDevPAddr.uiAddr + ui32Offset) & ui32PageMask, + (ui32PTE << psMMUAttrib->ui32PTEAlignShift), + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag2); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS); + } + } + } + + /* update details for next page */ + + /* page offset 0 after first page dump */ + ui32PageOffset = 0; + /* bytes left over */ + ui32Bytes -= ui32BlockBytes; + /* advance the cpuVaddr */ + pui8LinAddr += ui32BlockBytes; + /* update the file write offset */ + ui32ParamOutPos += ui32BlockBytes; + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_DEV_PHYADDR sPDDevPAddr, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PageByteOffset; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_VIRTADDR sDevVPageAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; + IMG_UINT32 ui32ParamOutPos; + PDUMP_MMU_ATTRIB *psMMUAttrib; + IMG_UINT32 ui32PageMask; /* mask for the physical page backing the PT */ + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + if (!PDumpOSJTInitialised()) + { + return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE; + } + + psMMUAttrib = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + ui32PageMask = psMMUAttrib->ui32PTSize - 1; + + ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2); + + /* Write the PD phys addr to the param stream up front */ + if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2), + (IMG_UINT8 *)&sPDDevPAddr, + sizeof(IMG_DEV_PHYADDR), + ui32Flags)) + { + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + + if (PDumpOSGetParamFileNum() == 0) + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm"); + } + else + { + eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%_%u.prm", PDumpOSGetParamFileNum()); + } + if(eErr != PVRSRV_OK) + { + return eErr; + } + + /* Write a comment indicating the PD phys addr write, so that the offsets + * into the param stream increase in correspondence with the number of bytes + * written. */ + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "-- LDB :%s:PA_0x%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1, + sPDDevPAddr.uiAddr & ~ui32PageMask, + sPDDevPAddr.uiAddr & ui32PageMask, + sizeof(IMG_DEV_PHYADDR), + ui32ParamOutPos, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + sDevVAddr = psMemInfo->sDevVAddr; + ui32PageByteOffset = sDevVAddr.uiAddr & ui32PageMask; + + sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset; + PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0); + + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + sDevPAddr.uiAddr += ui32PageByteOffset + ui32Offset; + + if ((sPDDevPAddr.uiAddr & psMMUAttrib->ui32PDEMask) != 0UL) + { +#if defined(SGX_FEATURE_36BIT_MMU) + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:$1 :%s:PA_%08X%08X:0x0\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)hUniqueTag2, + sPDDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "AND :%s:$2 :%s:$1 0xFFFFFFFF\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X :%s:$2\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)hUniqueTag1, + (sDevPAddr.uiAddr) & ~(psMMUAttrib->ui32DataPageMask), + (sDevPAddr.uiAddr) & (psMMUAttrib->ui32DataPageMask), + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :%s:$2 :%s:$1 0x20\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X :%s:$2\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)hUniqueTag1, + (sDevPAddr.uiAddr + 4) & ~(psMMUAttrib->ui32DataPageMask), + (sDevPAddr.uiAddr + 4) & (psMMUAttrib->ui32DataPageMask), + psMMUAttrib->sDevId.pszPDumpDevName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); +#else + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X :%s:PA_%08X%08X:0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1, + sDevPAddr.uiAddr & ~ui32PageMask, + sDevPAddr.uiAddr & ui32PageMask, + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag2, + sPDDevPAddr.uiAddr & psMMUAttrib->ui32PDEMask, + sPDDevPAddr.uiAddr & ~psMMUAttrib->ui32PDEMask); + if(eErr != PVRSRV_OK) + { + return eErr; + } +#endif + } + else + { + PVR_ASSERT(!(sDevPAddr.uiAddr & psMMUAttrib->ui32PTEValid)); + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X 0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1, + sDevPAddr.uiAddr & ~ui32PageMask, + sDevPAddr.uiAddr & ui32PageMask, + sPDDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + return eErr; + } + } + PDumpOSWriteString2(hScript, ui32Flags); + + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpCommentKM + * Inputs : pszComment, ui32Flags + * Outputs : None + * Returns : None + * Description : Dumps a comment +**************************************************************************/ +PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_CHAR pszCommentPrefix[] = "-- "; /* prefix for comments */ +#if defined(PDUMP_DEBUG_OUTFILES) + IMG_CHAR pszTemp[256]; +#endif + IMG_UINT32 ui32LenCommentPrefix; + PDUMP_GET_SCRIPT_STRING(); + PDUMP_DBG(("PDumpCommentKM")); +#if defined(PDUMP_DEBUG_OUTFILES) + /* include comments in the "extended" init phase. + * default is to ignore them. + */ + ui32Flags |= ( _PDumpIsPersistent() ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + /* Put \r \n sequence at the end if it isn't already there */ + PDumpOSVerifyLineEnding(pszComment, ui32MaxLen); + + /* Length of string excluding terminating NULL character */ + ui32LenCommentPrefix = PDumpOSBuflen(pszCommentPrefix, sizeof(pszCommentPrefix)); + + /* Ensure output file is available for writing */ + /* FIXME: is this necessary? */ + if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_SCRIPT2), + (IMG_UINT8*)pszCommentPrefix, + ui32LenCommentPrefix, + ui32Flags)) + { +#if defined(PDUMP_DEBUG_OUTFILES) + if(ui32Flags & PDUMP_FLAGS_CONTINUOUS) + { + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s (continuous set)", + g_ui32EveryLineCounter, pszComment)); + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + else if(ui32Flags & PDUMP_FLAGS_PERSISTENT) + { + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s (persistent set)", + g_ui32EveryLineCounter, pszComment)); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } + else + { + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %d: %s", + g_ui32EveryLineCounter, pszComment)); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } +#else + PVR_DPF((PVR_DBG_WARNING, "Incomplete comment, %s", + pszComment)); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; +#endif + } + +#if defined(PDUMP_DEBUG_OUTFILES) + /* Prefix comment with PID and line number */ + eErr = PDumpOSSprintf(pszTemp, 256, "%d-%d %s", + _PDumpGetPID(), + g_ui32EveryLineCounter, + pszComment); + + /* Append the comment to the script stream */ + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "%s", + pszTemp); +#else + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "%s", + pszComment); +#endif + if( (eErr != PVRSRV_OK) && + (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpCommentWithFlags + * Inputs : psPDev - PDev for PDump device + * : pszFormat - format string for comment + * : ... - args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comments +**************************************************************************/ +PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...) +{ + PVRSRV_ERROR eErr; + PDUMP_va_list ap; + PDUMP_GET_MSG_STRING(); + + /* Construct the string */ + PDUMP_va_start(ap, pszFormat); + eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap); + PDUMP_va_end(ap); + + if(eErr != PVRSRV_OK) + { + return eErr; + } + return PDumpCommentKM(pszMsg, ui32Flags); +} + +/************************************************************************** + * Function Name : PDumpComment + * Inputs : psPDev - PDev for PDump device + * : pszFormat - format string for comment + * : ... - args for format string + * Outputs : None + * Returns : None + * Description : PDumps a comments +**************************************************************************/ +PVRSRV_ERROR PDumpComment(IMG_CHAR *pszFormat, ...) +{ + PVRSRV_ERROR eErr; + PDUMP_va_list ap; + PDUMP_GET_MSG_STRING(); + + /* Construct the string */ + PDUMP_va_start(ap, pszFormat); + eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, ap); + PDUMP_va_end(ap); + + if(eErr != PVRSRV_OK) + { + return eErr; + } + return PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS); +} + +/************************************************************************** + * Function Name : PDumpDriverInfoKM + * Inputs : pszString, ui32Flags + * Outputs : None + * Returns : None + * Description : Dumps a comment +**************************************************************************/ +PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32MsgLen; + PDUMP_GET_MSG_STRING(); + + /* Construct the string */ + eErr = PDumpOSSprintf(pszMsg, ui32MaxLen, "%s", pszString); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + /* Put \r \n sequence at the end if it isn't already there */ + PDumpOSVerifyLineEnding(pszMsg, ui32MaxLen); + ui32MsgLen = PDumpOSBuflen(pszMsg, ui32MaxLen); + + if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_DRIVERINFO), + (IMG_UINT8*)pszMsg, + ui32MsgLen, + ui32Flags)) + { + if (ui32Flags & PDUMP_FLAGS_CONTINUOUS) + { + return PVRSRV_ERROR_PDUMP_BUFFER_FULL; + } + else + { + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } + } + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PDumpBitmapKM + + @Description + + Dumps a bitmap from device memory to a file + + @Input psDevId + @Input pszFileName + @Input ui32FileOffset + @Input ui32Width + @Input ui32Height + @Input ui32StrideInBytes + @Input sDevBaseAddr + @Input ui32Size + @Input ePixelFormat + @Input eMemFormat + @Input ui32PDumpFlags + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpBitmapKM( PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_HANDLE hDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + PDUMP_MEM_FORMAT eMemFormat, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId; + IMG_UINT32 ui32MMUContextID; + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + if ( _PDumpIsPersistent() ) + { + return PVRSRV_OK; + } + + PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n"); + + /* find MMU context ID */ + ui32MMUContextID = psDeviceNode->pfnMMUGetContextID( hDevMemContext ); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SII %s %s.bin :%s:v%x:0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\r\n", + pszFileName, + pszFileName, + psDevId->pszPDumpDevName, + ui32MMUContextID, + sDevBaseAddr.uiAddr, + ui32Size, + ui32FileOffset, + ePixelFormat, + ui32Width, + ui32Height, + ui32StrideInBytes, + eMemFormat); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpOSWriteString2( hScript, ui32PDumpFlags); + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PDumpReadRegKM + + @Description + + Dumps a read from a device register to a file + + @Input psConnection : connection info + @Input pszFileName + @Input ui32FileOffset + @Input ui32Address + @Input ui32Size + @Input ui32PDumpFlags + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + PVR_UNREFERENCED_PARAMETER(ui32Size); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:0x%08X 0x%08X %s\r\n", + pszPDumpRegName, + ui32Address, + ui32FileOffset, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpOSWriteString2( hScript, ui32PDumpFlags); + + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpTestNextFrame + @brief Tests whether the next frame will be pdumped + @param ui32CurrentFrame + @return bFrameDumped +*****************************************************************************/ +IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame) +{ + IMG_BOOL bFrameDumped; + + /* + Try dumping a string + */ + (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame + 1); + bFrameDumped = PDumpIsCaptureFrameKM(); + (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame); + + return bFrameDumped; +} + +/***************************************************************************** + @name PDumpSignatureRegister + @brief Dumps a single signature register + @param psDevId - device ID + @param ui32Address - The register address + @param ui32Size - The amount of data to be dumped in bytes + @param pui32FileOffset - Offset of dump in output file + @param ui32Flags - Flags + @return none +*****************************************************************************/ +static PVRSRV_ERROR PDumpSignatureRegister (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 *pui32FileOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:0x%08X 0x%08X %s\r\n", + psDevId->pszPDumpRegName, + ui32Address, + *pui32FileOffset, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpOSWriteString2(hScript, ui32Flags); + *pui32FileOffset += ui32Size; + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpRegisterRange + @brief Dumps a list of signature registers to a file + @param psDevId - device ID + @param pszFileName - target filename for dump + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @param pui32FileOffset - file offset + @param ui32Size - size of write in bytes + @param ui32Flags - pdump flags + @return none + *****************************************************************************/ +static IMG_VOID PDumpRegisterRange(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters, + IMG_UINT32 *pui32FileOffset, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags) +{ + IMG_UINT32 i; + for (i = 0; i < ui32NumRegisters; i++) + { + PDumpSignatureRegister(psDevId, pszFileName, pui32Registers[i], ui32Size, pui32FileOffset, ui32Flags); + } +} + +/***************************************************************************** + @name PDump3DSignatureRegisters + @brief Dumps the signature registers for 3D modules... + @param psDevId - device ID info + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDump3DSignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32FileOffset, ui32Flags; + + PDUMP_GET_FILE_STRING(); + + ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0; + ui32FileOffset = 0; + + PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump 3D signature registers\r\n"); + eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u_3d.sig", ui32DumpFrameNum); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpRegisterRange(psDevId, + pszFileName, + pui32Registers, + ui32NumRegisters, + &ui32FileOffset, + sizeof(IMG_UINT32), + ui32Flags); + + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpTASignatureRegisters + @brief Dumps the TA signature registers + @param psDevId - device id info + @param ui32DumpFrameNum - frame number + @param ui32TAKickCount - TA kick counter + @param bLastFrame + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpTASignatureRegisters (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_UINT32 ui32TAKickCount, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32FileOffset, ui32Flags; + + PDUMP_GET_FILE_STRING(); + + ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0; + ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(IMG_UINT32); + + PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump TA signature registers\r\n"); + eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u_ta.sig", ui32DumpFrameNum); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpRegisterRange(psDevId, + pszFileName, + pui32Registers, + ui32NumRegisters, + &ui32FileOffset, + sizeof(IMG_UINT32), + ui32Flags); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpCounterRegisters + @brief Dumps the performance counters + @param psDevId - device id info + @param ui32DumpFrameNum - frame number + @param bLastFrame + @param pui32Registers - register list + @param ui32NumRegisters - number of regs to dump + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpCounterRegisters (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32FileOffset, ui32Flags; + + PDUMP_GET_FILE_STRING(); + + ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0UL; + ui32FileOffset = 0UL; + + PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump counter registers\r\n"); + eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%u.perf", ui32DumpFrameNum); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpRegisterRange(psDevId, + pszFileName, + pui32Registers, + ui32NumRegisters, + &ui32FileOffset, + sizeof(IMG_UINT32), + ui32Flags); + + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpRegRead + @brief Dump signature register read to script + @param pszPDumpDevName - pdump device name + @param ui32RegOffset - register offset + @param ui32Flags - pdump flags + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpRegRead(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X\r\n", + pszPDumpRegName, + ui32RegOffset); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpSaveMemKM + @brief Save device memory to a file + @param psDevId + @param pszFileName + @param ui32FileOffset + @param sDevBaseAddr + @param ui32Size + @param ui32PDumpFlags + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpSaveMemKM (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:v%x:0x%08X 0x%08X 0x%08X %s.bin\r\n", + psDevId->pszPDumpDevName, + ui32MMUContextID, + sDevBaseAddr.uiAddr, + ui32Size, + ui32FileOffset, + pszFileName); + if(eErr != PVRSRV_OK) + { + return eErr; + } + + PDumpOSWriteString2(hScript, ui32PDumpFlags); + return PVRSRV_OK; +} + +/***************************************************************************** + @name PDumpCycleCountRegRead + @brief Dump counter register read to script + @param ui32RegOffset - register offset + @param bLastFrame + @return Error +*****************************************************************************/ +PVRSRV_ERROR PDumpCycleCountRegRead(PVRSRV_DEVICE_IDENTIFIER *psDevId, + const IMG_UINT32 ui32RegOffset, + IMG_BOOL bLastFrame) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X\r\n", + psDevId->pszPDumpRegName, + ui32RegOffset); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0); + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PDumpSignatureBuffer + + @Description + + Dumps a signature registers buffer + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpSignatureBuffer (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_CHAR *pszBufferType, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump microkernel %s signature Buffer\r\n", + pszBufferType); + PDumpCommentWithFlags(ui32PDumpFlags, "Buffer format (sizes in 32-bit words):\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tNumber of signatures per sample (1)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tNumber of samples (1)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tSignature register offsets (1 * number of signatures)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "\tSignature sample values (number of samples * number of signatures)\r\n"); + PDumpCommentWithFlags(ui32PDumpFlags, "Note: If buffer is full, last sample is final state after test completed\r\n"); + return PDumpSaveMemKM(psDevId, pszFileName, ui32FileOffset, sDevBaseAddr, ui32Size, + ui32MMUContextID, ui32PDumpFlags); +} + + +/*! +****************************************************************************** + + @Function PDumpHWPerfCBKM + + @Description + + Dumps the HW Perf Circular Buffer + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PDumpHWPerfCBKM (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags) +{ + PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump Hardware Performance Circular Buffer\r\n"); + return PDumpSaveMemKM(psDevId, pszFileName, ui32FileOffset, sDevBaseAddr, ui32Size, + ui32MMUContextID, ui32PDumpFlags); +} + + +/***************************************************************************** + FUNCTION : PDumpCBP + + PURPOSE : Dump CBP command to script + + PARAMETERS : + + RETURNS : None +*****************************************************************************/ +PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo, + IMG_UINT32 ui32ROffOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + PVRSRV_ERROR eErr; + IMG_UINT32 ui32PageOffset; + IMG_UINT8 *pui8LinAddr; + IMG_DEV_VIRTADDR sDevVAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_DEV_VIRTADDR sDevVPageAddr; + //IMG_CPU_PHYADDR CpuPAddr; + PDUMP_MMU_ATTRIB *psMMUAttrib; + + PDUMP_GET_SCRIPT_STRING(); + + psMMUAttrib = ((BM_BUF*)psROffMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->psMMUAttrib; + + /* Check the offset and size don't exceed the bounds of the allocation */ + PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->uAllocSize); + + pui8LinAddr = psROffMemInfo->pvLinAddrKM; + sDevVAddr = psROffMemInfo->sDevVAddr; + + /* Advance addresses by offset */ + pui8LinAddr += ui32ROffOffset; + sDevVAddr.uiAddr += ui32ROffOffset; + + /* + query the buffer manager for the physical pages that back the + virtual address + */ + PDumpOSCPUVAddrToPhysPages(psROffMemInfo->sMemBlk.hOSMemHandle, + ui32ROffOffset, + pui8LinAddr, + psMMUAttrib->ui32DataPageMask, + &ui32PageOffset); + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset; + + PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0); + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageOffset; + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "CBP :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X 0x%08X\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr & ~(psMMUAttrib->ui32DataPageMask), + sDevPAddr.uiAddr & (psMMUAttrib->ui32DataPageMask), + ui32WPosVal, + ui32PacketSize, + ui32BufferSize); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpIDLWithFlags + * Inputs : Idle time in clocks + * Outputs : None + * Returns : Error + * Description : Dump IDL command to script +**************************************************************************/ +PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + PDUMP_DBG(("PDumpIDLWithFlags")); + + eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %u\r\n", ui32Clocks); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, ui32Flags); + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpIDL + * Inputs : Idle time in clocks + * Outputs : None + * Returns : Error + * Description : Dump IDL command to script +**************************************************************************/ +PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks) +{ + return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS); +} + +/************************************************************************** + * Function Name : PDumpMemUM + * Inputs : pvAltLinAddrUM + * : pvLinAddrUM + * : psMemInfo + * : ui32Offset + * : ui32Bytes + * : ui32Flags + * : hUniqueTag + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Dump user mode memory +**************************************************************************/ +PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_PVOID pvAltLinAddrUM, + IMG_PVOID pvLinAddrUM, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag) +{ + IMG_VOID *pvAddrUM; + IMG_VOID *pvAddrKM; + PVRSRV_ERROR eError; + + if (psMemInfo->pvLinAddrKM != IMG_NULL && pvAltLinAddrUM == IMG_NULL) + { + /* + * There is a kernel virtual address for the memory that is + * being dumped, and no alternate user mode linear address. + */ + return PDumpMemKM(IMG_NULL, + psMemInfo, + ui32Offset, + ui32Bytes, + ui32Flags, + hUniqueTag); + } + + pvAddrUM = (pvAltLinAddrUM != IMG_NULL) ? pvAltLinAddrUM : ((pvLinAddrUM != IMG_NULL) ? VPTR_PLUS(pvLinAddrUM, ui32Offset) : IMG_NULL); + + pvAddrKM = GetTempBuffer(); + + /* + * The memory to be dumped needs to be copied in from + * the client. Dump the memory, a buffer at a time. + */ + PVR_ASSERT(pvAddrUM != IMG_NULL && pvAddrKM != IMG_NULL); + if (pvAddrUM == IMG_NULL || pvAddrKM == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE) + { + PDumpCommentWithFlags(ui32Flags, "Dumping 0x%08x bytes of memory, in blocks of 0x%08x bytes", ui32Bytes, (IMG_UINT32)PDUMP_TEMP_BUFFER_SIZE); + } + + if (psMemInfo->ui32Flags & PVRSRV_MEM_SPARSE) + { + /* + In case of sparse mappings we can't just copy the full range as not + all pages are valid, instead we walk a page at a time only dumping + if the a page exists at that address + */ + IMG_UINT32 ui32BytesRemain = ui32Bytes; + IMG_UINT32 ui32InPageStart = ui32Offset & (~HOST_PAGEMASK); + IMG_UINT32 ui32PageOffset = ui32Offset & (HOST_PAGEMASK); + IMG_UINT32 ui32BytesToCopy = MIN(HOST_PAGESIZE() - ui32InPageStart, ui32BytesRemain); + + do + { + if (BM_MapPageAtOffset(BM_MappingHandleFromBuffer(psMemInfo->sMemBlk.hBuffer), ui32PageOffset)) + { + eError = OSCopyFromUser(psPerProc, + pvAddrKM, + pvAddrUM, + ui32BytesToCopy); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d)", eError)); + return eError; + } + + /* + At this point we know we're dumping a valid page so call + the internal function + */ + eError = _PDumpMemIntKM(pvAddrKM, + psMemInfo, + ui32PageOffset + ui32InPageStart, + ui32BytesToCopy, + ui32Flags, + hUniqueTag); + + if (eError != PVRSRV_OK) + { + /* + * If writing fails part way through, then some + * investigation is needed. + */ + if (ui32BytesToCopy != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError)); + } + PVR_ASSERT(ui32BytesToCopy == 0); + return eError; + } + } + + VPTR_INC(pvAddrUM, ui32BytesToCopy); + ui32BytesRemain -= ui32BytesToCopy; + ui32InPageStart = 0; + ui32PageOffset += HOST_PAGESIZE(); + } while(ui32BytesRemain); + } + else + { + IMG_UINT32 ui32CurrentOffset = ui32Offset; + IMG_UINT32 ui32BytesDumped; + + for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;) + { + IMG_UINT32 ui32BytesToDump = MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped); + + eError = OSCopyFromUser(psPerProc, + pvAddrKM, + pvAddrUM, + ui32BytesToDump); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d)", eError)); + return eError; + } + + eError = PDumpMemKM(pvAddrKM, + psMemInfo, + ui32CurrentOffset, + ui32BytesToDump, + ui32Flags, + hUniqueTag); + + if (eError != PVRSRV_OK) + { + /* + * If writing fails part way through, then some + * investigation is needed. + */ + if (ui32BytesDumped != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError)); + } + PVR_ASSERT(ui32BytesDumped == 0); + return eError; + } + + VPTR_INC(pvAddrUM, ui32BytesToDump); + ui32CurrentOffset += ui32BytesToDump; + ui32BytesDumped += ui32BytesToDump; + } + } + + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : _PdumpAllocMMUContext + * Inputs : pui32MMUContextID + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : pdump util to allocate MMU contexts +**************************************************************************/ +static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID) +{ + IMG_UINT32 i; + + /* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */ + for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++) + { + if((gui16MMUContextUsage & (1U << i)) == 0) + { + /* mark in use */ + gui16MMUContextUsage |= 1U << i; + *pui32MMUContextID = i; + return PVRSRV_OK; + } + } + + PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids")); + + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; +} + + +/************************************************************************** + * Function Name : _PdumpFreeMMUContext + * Inputs : ui32MMUContextID + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : pdump util to free MMU contexts +**************************************************************************/ +static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID) +{ + if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS) + { + /* free the id */ + gui16MMUContextUsage &= ~(1U << ui32MMUContextID); + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid")); + + return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; +} + + +/************************************************************************** + * Function Name : PDumpSetMMUContext + * Inputs : + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Set MMU Context +**************************************************************************/ +PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_CHAR *pszMemSpace, + IMG_UINT32 *pui32MMUContextID, + IMG_UINT32 ui32MMUType, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hOSMemHandle, + IMG_VOID *pvPDCPUAddr) +{ + IMG_UINT8 *pui8LinAddr = (IMG_UINT8 *)pvPDCPUAddr; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_DEV_PHYADDR sDevPAddr; + IMG_UINT32 ui32MMUContextID; + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + + eErr = _PdumpAllocMMUContext(&ui32MMUContextID); + if(eErr != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d", eErr)); + return eErr; + } + + /* derive the DevPAddr */ + /* FIXME: if we used OSMemHandleToCPUPAddr() here, we could lose the lin addr arg */ + sCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle, pui8LinAddr); + sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr); + /* and round to 4k page */ + sDevPAddr.uiAddr &= ~((PVRSRV_4K_PAGE_SIZE) -1); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "MMU :%s:v%d %d :%s:PA_%08X%08X\r\n", + pszMemSpace, + ui32MMUContextID, + ui32MMUType, + pszMemSpace, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag1, + sDevPAddr.uiAddr); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS); + + /* return the MMU Context ID */ + *pui32MMUContextID = ui32MMUContextID; + + return PVRSRV_OK; +} + + +/************************************************************************** + * Function Name : PDumpClearMMUContext + * Inputs : + * Outputs : None + * Returns : PVRSRV_ERROR + * Description : Clear MMU Context +**************************************************************************/ +PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_CHAR *pszMemSpace, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32MMUType) +{ + PVRSRV_ERROR eErr; + PDUMP_GET_SCRIPT_STRING(); + PVR_UNREFERENCED_PARAMETER(eDeviceType); + PVR_UNREFERENCED_PARAMETER(ui32MMUType); + + /* FIXME: Propagate error from PDumpComment once it's supported on + * all OSes and platforms + */ + PDumpComment("Clear MMU Context for memory space %s\r\n", pszMemSpace); + eErr = PDumpOSBufprintf(hScript, + ui32MaxLen, + "MMU :%s:v%d\r\n", + pszMemSpace, + ui32MMUContextID); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS); + + eErr = _PdumpFreeMMUContext(ui32MMUContextID); + if(eErr != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d", eErr)); + return eErr; + } + + return PVRSRV_OK; +} + +/***************************************************************************** + FUNCTION : PDumpStoreMemToFile + + PURPOSE : Dumps a given addr:size to a file + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +PVRSRV_ERROR PDumpStoreMemToFile(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 uiAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_DEV_PHYADDR sDevPAddr; + IMG_DEV_VIRTADDR sDevVPageAddr; + IMG_UINT32 ui32PageOffset; + + PDUMP_GET_SCRIPT_STRING(); + + /* + query the buffer manager for the physical pages that back the + virtual address + */ + ui32PageOffset = (IMG_UINT32)((IMG_UINTPTR_T)psMemInfo->pvLinAddrKM & psMMUAttrib->ui32DataPageMask); + + /* calculate the DevV page address */ + sDevVPageAddr.uiAddr = uiAddr - ui32PageOffset; + + /* get the physical page address based on the device virtual address */ + BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr); + + /* convert DevP page address to byte address */ + sDevPAddr.uiAddr += ui32PageOffset; + + PDumpOSBufprintf(hScript, + ui32MaxLen, + "SAB :%s:PA_%08X%08X:0x%08X 0x%08X 0x%08X %s\r\n", + psMMUAttrib->sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)hUniqueTag, + sDevPAddr.uiAddr & ~psMMUAttrib->ui32DataPageMask, + sDevPAddr.uiAddr & psMMUAttrib->ui32DataPageMask, + ui32Size, + ui32FileOffset, + pszFileName); + + PDumpOSWriteString2(hScript, ui32PDumpFlags); + + return PVRSRV_OK; +} + +/***************************************************************************** + FUNCTION : PDumpRegBasedCBP + + PURPOSE : Dump CBP command to script + + PARAMETERS : + + RETURNS : None +*****************************************************************************/ +PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags) +{ + PDUMP_GET_SCRIPT_STRING(); + + PDumpOSBufprintf(hScript, + ui32MaxLen, + "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X\r\n", + pszPDumpRegName, + ui32RegOffset, + ui32WPosVal, + ui32PacketSize, + ui32BufferSize); + PDumpOSWriteString2(hScript, ui32Flags); + + return PVRSRV_OK; +} + + +/**************************************************** + * Non-uitron code here. + * For example, code communicating with dbg driver. + ***************************************************/ +/* PRQA S 5087 1 */ /* include file needed here */ +#include "syscommon.h" + +/************************************************************************** + * Function Name : PDumpConnectionNotify + * Description : Called by the debugdrv to tell Services that pdump has + * connected + * NOTE: No debugdrv on uitron. + **************************************************************************/ +IMG_EXPORT IMG_VOID PDumpConnectionNotify(IMG_VOID) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE *psThis; + PVR_DPF((PVR_DBG_WARNING, "PDump has connected.")); + + /* Loop over all known devices */ + SysAcquireData(&psSysData); + + psThis = psSysData->psDeviceNodeList; + while (psThis) + { + if (psThis->pfnPDumpInitDevice) + { + /* Reset pdump according to connected device */ + psThis->pfnPDumpInitDevice(psThis); + } + psThis = psThis->psNext; + } +} + +/***************************************************************************** + * Function Name : DbgWrite + * Inputs : psStream - debug stream to write to + pui8Data - buffer + ui32BCount - buffer length + ui32Flags - flags, e.g. continuous, LF + * Outputs : None + * Returns : Bytes written + * Description : Write a block of data to a debug stream + * NOTE: No debugdrv on uitron. + *****************************************************************************/ +IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32BytesWritten = 0; + IMG_UINT32 ui32Off = 0; + PDBG_STREAM_CONTROL psCtrl = psStream->psCtrl; + + /* Return immediately if marked as "never" */ + if ((ui32Flags & PDUMP_FLAGS_NEVER) != 0) + { + return ui32BCount; + } + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Return if process is not marked for pdumping, unless it's persistent. + */ + if ( (_PDumpIsProcessActive() == IMG_FALSE ) && + ((ui32Flags & PDUMP_FLAGS_PERSISTENT) == 0) ) + { + return ui32BCount; + } +#endif + + /* Send persistent data first ... + * If we're still initialising the params will be captured to the + * init stream in the call to pfnDBGDrivWrite2 below. + */ + if ( ((ui32Flags & PDUMP_FLAGS_PERSISTENT) != 0) && (psCtrl->bInitPhaseComplete) ) + { + while (ui32BCount > 0) + { + /* + Params marked as persistent should be appended to the init phase. + For example window system mem mapping of the primary surface. + */ + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_PERSISTENT, + &pui8Data[ui32Off], ui32BCount, 1, 0); + + if (ui32BytesWritten == 0) + { + PDumpOSReleaseExecution(); + } + + if (ui32BytesWritten != 0xFFFFFFFFU) + { + ui32Off += ui32BytesWritten; + ui32BCount -= ui32BytesWritten; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "DbgWrite: Failed to send persistent data")); + if( (psCtrl->ui32Flags & DEBUG_FLAGS_READONLY) != 0) + { + /* suspend pdump to prevent flooding kernel log buffer */ + PDumpSuspendKM(); + } + return 0xFFFFFFFFU; + } + } + + /* reset buffer counters */ + ui32BCount = ui32Off; ui32Off = 0; ui32BytesWritten = 0; + } + + while (((IMG_UINT32) ui32BCount > 0) && (ui32BytesWritten != 0xFFFFFFFFU)) + { + if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0) + { + /* + If pdump client (or its equivalent) isn't running then throw continuous data away. + */ + if (((psCtrl->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) && + (psCtrl->ui32Start == 0xFFFFFFFFU) && + (psCtrl->ui32End == 0xFFFFFFFFU) && + psCtrl->bInitPhaseComplete) + { + ui32BytesWritten = ui32BCount; + } + else + { + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_CONTINUOUS, + &pui8Data[ui32Off], ui32BCount, 1, 0); + } + } + else + { + if (ui32Flags & PDUMP_FLAGS_LASTFRAME) + { + IMG_UINT32 ui32DbgFlags; + + ui32DbgFlags = 0; + if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER) + { + ui32DbgFlags |= WRITELF_FLAGS_RESETBUF; + } + + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_LASTFRAME, + &pui8Data[ui32Off], ui32BCount, 1, ui32DbgFlags); + } + else + { + ui32BytesWritten = PDumpOSDebugDriverWrite( psStream, + PDUMP_WRITE_MODE_BINCM, + &pui8Data[ui32Off], ui32BCount, 1, 0); + } + } + + /* + If the debug driver's buffers are full so no data could be written then yield + execution so pdump can run and empty them. + */ + if (ui32BytesWritten == 0) + { + PDumpOSReleaseExecution(); + } + + if (ui32BytesWritten != 0xFFFFFFFFU) + { + ui32Off += ui32BytesWritten; + ui32BCount -= ui32BytesWritten; + } + + /* loop exits when i) all data is written, or ii) an unrecoverable error occurs */ + } + + return ui32BytesWritten; +} + + + +#else /* defined(PDUMP) */ +/* disable warning about empty module */ +#endif /* defined(PDUMP) */ +/***************************************************************************** + End of file (pdump_common.c) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/perproc.c b/pvr-source/services4/srvkm/common/perproc.c new file mode 100644 index 0000000..3918bb2 --- /dev/null +++ b/pvr-source/services4/srvkm/common/perproc.c @@ -0,0 +1,398 @@ +/*************************************************************************/ /*! +@Title Per-process storage +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Manage per-process storage +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "resman.h" +#include "handle.h" +#include "perproc.h" +#include "osperproc.h" +#if defined(TTRACE) +#include "ttrace.h" +#endif + +#define HASH_TAB_INIT_SIZE 32 + +static HASH_TABLE *psHashTab = IMG_NULL; + +/*! +****************************************************************************** + + @Function FreePerProcData + + @Description Free a per-process data area + + @Input psPerProc - pointer to per-process data area + + @Return Error code, or PVRSRV_OK + +******************************************************************************/ +static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_UINTPTR_T uiPerProc; + + PVR_ASSERT(psPerProc != IMG_NULL); + + if (psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID); + if (uiPerProc == 0) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table")); + /* + * We must have failed early in the per-process data area + * creation, before the process ID was set. + */ + PVR_ASSERT(psPerProc->ui32PID == 0); + } + else + { + PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc); + PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID); + } + + /* Free handle base for this process */ + if (psPerProc->psHandleBase != IMG_NULL) + { + eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError)); + return eError; + } + } + + /* Release handle for per-process data area */ + if (psPerProc->hPerProcData != IMG_NULL) + { + eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError)); + return eError; + } + } + + /* Call environment specific per process deinit function */ + eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)", eError)); + return eError; + } + + eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(*psPerProc), + psPerProc, + psPerProc->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessData + + @Description Return per-process data area + + @Input ui32PID - process ID + + @Return Pointer to per-process data area, or IMG_NULL on error. + +******************************************************************************/ +PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID) +{ + PVRSRV_PER_PROCESS_DATA *psPerProc; + + PVR_ASSERT(psHashTab != IMG_NULL); + + /* Look for existing per-process data area */ + psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID); + return psPerProc; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataConnect + + @Description Allocate per-process data area, or increment refcount if one + already exists for this PID. + + @Input ui32PID - process ID + ppsPerProc - Pointer to per-process data area + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags) +{ + PVRSRV_PER_PROCESS_DATA *psPerProc; + IMG_HANDLE hBlockAlloc; + PVRSRV_ERROR eError = PVRSRV_OK; + + if (psHashTab == IMG_NULL) + { + return PVRSRV_ERROR_INIT_FAILURE; + } + + /* Look for existing per-process data area */ + psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID); + + if (psPerProc == IMG_NULL) + { + /* Allocate per-process data area */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(*psPerProc), + (IMG_PVOID *)&psPerProc, + &hBlockAlloc, + "Per Process Data"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", eError)); + return eError; + } + OSMemSet(psPerProc, 0, sizeof(*psPerProc)); + psPerProc->hBlockAlloc = hBlockAlloc; + + if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table")); + eError = PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED; + goto failure; + } + + psPerProc->ui32PID = ui32PID; + psPerProc->ui32RefCount = 0; + +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if (ui32Flags == SRV_FLAGS_PDUMP_ACTIVE) + { + psPerProc->bPDumpActive = IMG_TRUE; + } +#else + PVR_UNREFERENCED_PARAMETER(ui32Flags); +#endif + + /* Call environment specific per process init function */ + eError = OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)", eError)); + goto failure; + } + + /* Allocate a handle for the per-process data area */ + eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE, + &psPerProc->hPerProcData, + psPerProc, + PVRSRV_HANDLE_TYPE_PERPROC_DATA, + PVRSRV_HANDLE_ALLOC_FLAG_NONE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", eError)); + goto failure; + } + + /* Allocate handle base for this process */ + eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", eError)); + goto failure; + } + + /* Set per-process handle options */ + eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)", eError)); + goto failure; + } + + /* Create a resource manager context for the process */ + eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager")); + goto failure; + } +#if defined (TTRACE) + PVRSRVTimeTraceBufferCreate(ui32PID); +#endif + } + + psPerProc->ui32RefCount++; + PVR_DPF((PVR_DBG_MESSAGE, + "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d", + ui32PID, psPerProc->ui32RefCount)); + + return eError; + +failure: + (IMG_VOID)FreePerProcessData(psPerProc); + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataDisconnect + + @Description Decrement refcount for per-process data area, + and free the resources if necessary. + + @Input ui32PID - process ID + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID) +{ + PVRSRV_ERROR eError; + PVRSRV_PER_PROCESS_DATA *psPerProc; + + PVR_ASSERT(psHashTab != IMG_NULL); + + psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID); + if (psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u", ui32PID)); + } + else + { + psPerProc->ui32RefCount--; + if (psPerProc->ui32RefCount == 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVPerProcessDataDisconnect: " + "Last close from process 0x%x received", ui32PID)); + + /* Close the Resource Manager connection */ + PVRSRVResManDisconnect(psPerProc->hResManContext, IMG_FALSE); + +#if defined (TTRACE) + PVRSRVTimeTraceBufferDestroy(ui32PID); +#endif + + /* Free the per-process data */ + eError = FreePerProcessData(psPerProc); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Error freeing per-process data")); + } + } + } + + eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Purge of global handle pool failed (%d)", eError)); + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataInit + + @Description Initialise per-process data management + + @Return Error code, or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID) +{ + PVR_ASSERT(psHashTab == IMG_NULL); + + /* Create hash table */ + psHashTab = HASH_Create(HASH_TAB_INIT_SIZE); + if (psHashTab == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table")); + return PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVPerProcessDataDeInit + + @Description De-initialise per-process data management + + @Return Error code, or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID) +{ + /* Destroy per-process data area hash table */ + if (psHashTab != IMG_NULL) + { + /* Free the hash table */ + HASH_Delete(psHashTab); + psHashTab = IMG_NULL; + } + + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (perproc.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/power.c b/pvr-source/services4/srvkm/common/power.c new file mode 100644 index 0000000..511a690 --- /dev/null +++ b/pvr-source/services4/srvkm/common/power.c @@ -0,0 +1,996 @@ +/*************************************************************************/ /*! +@Title Power management functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "pdump_km.h" + +#include "lists.h" + +static IMG_BOOL gbInitServerRunning = IMG_FALSE; +static IMG_BOOL gbInitServerRan = IMG_FALSE; +static IMG_BOOL gbInitSuccessful = IMG_FALSE; + +/*! +****************************************************************************** + + @Function PVRSRVSetInitServerState + + @Description Sets given services init state. + + @Input eInitServerState : a services init state + @Input bState : a state to set + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState) +{ + + switch(eInitServerState) + { + case PVRSRV_INIT_SERVER_RUNNING: + gbInitServerRunning = bState; + break; + case PVRSRV_INIT_SERVER_RAN: + gbInitServerRan = bState; + break; + case PVRSRV_INIT_SERVER_SUCCESSFUL: + gbInitSuccessful = bState; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSetInitServerState : Unknown state %x", eInitServerState)); + return PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVGetInitServerState + + @Description Tests whether a given services init state was run. + + @Input eInitServerState : a services init state + + @Return IMG_BOOL + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState) +{ + IMG_BOOL bReturnVal; + + switch(eInitServerState) + { + case PVRSRV_INIT_SERVER_RUNNING: + bReturnVal = gbInitServerRunning; + break; + case PVRSRV_INIT_SERVER_RAN: + bReturnVal = gbInitServerRan; + break; + case PVRSRV_INIT_SERVER_SUCCESSFUL: + bReturnVal = gbInitSuccessful; + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVGetInitServerState : Unknown state %x", eInitServerState)); + bReturnVal = IMG_FALSE; + } + + return bReturnVal; +} + +/*! +****************************************************************************** + + @Function _IsSystemStatePowered + + @Description Tests whether a given system state represents powered-up. + + @Input eSystemPowerState : a system power state + + @Return IMG_BOOL + +******************************************************************************/ +static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState) +{ + return (IMG_BOOL)(eSystemPowerState < PVRSRV_SYS_POWER_STATE_D2); +} + + +/*! +****************************************************************************** + + @Function PVRSRVPowerLock + + @Description Obtain the mutex for power transitions + + @Input ui32CallerID : KERNEL_ID or ISR_ID + @Input bSystemPowerEvent : Only pass IMG_TRUE if the lock is for a + system power state change + + @Return PVRSRV_ERROR IMG_CALLCONV + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, + IMG_BOOL bSystemPowerEvent) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + IMG_UINT32 ui32Timeout = 1000000; + IMG_BOOL bTryLock = (ui32CallerID == ISR_ID); + + SysAcquireData(&psSysData); + + eError = OSPowerLockWrap(bTryLock); + if (eError != PVRSRV_OK) + { + return eError; + } + + do + { + eError = OSLockResource(&psSysData->sPowerStateChangeResource, + ui32CallerID); + if (eError == PVRSRV_OK) + { + break; + } + else if (bTryLock) + { + /* + ISR failed to acquire lock so it must be held by a kernel thread. + */ + eError = PVRSRV_ERROR_RETRY; + break; + } + + OSWaitus(1); + ui32Timeout--; + } while (ui32Timeout > 0); + + if (eError != PVRSRV_OK) + { + OSPowerLockUnwrap(); + } + + /* PRQA S 3415 3 */ /* side effects desired */ + if ((eError == PVRSRV_OK) && + !bSystemPowerEvent && + !_IsSystemStatePowered(psSysData->eCurrentPowerState)) + { + /* Reject device power state change due to system power state. */ + PVRSRVPowerUnlock(ui32CallerID); + eError = PVRSRV_ERROR_RETRY; + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVPowerUnlock + + @Description Release the mutex for power transitions + + @Input ui32CallerID : KERNEL_ID or ISR_ID + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID) +{ + OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID); + OSPowerLockUnwrap(); +} + + +/*! +****************************************************************************** + + @Function PVRSRVDevicePrePowerStateKM_AnyVaCb + + @Description + + Perform device-specific processing required before a power transition + + @Input psPowerDevice : the device + @Input va : variable argument list with: + bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + ui32DeviceIndex : device index + eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va) +{ + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + PVRSRV_ERROR eError; + + /*Variable Argument variables*/ + IMG_BOOL bAllDevices; + IMG_UINT32 ui32DeviceIndex; + PVRSRV_DEV_POWER_STATE eNewPowerState; + + /* WARNING: if types were not aligned to 4 bytes, this could be dangerous. */ + bAllDevices = va_arg(va, IMG_BOOL); + ui32DeviceIndex = va_arg(va, IMG_UINT32); + eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE); + + if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) + { + eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? + psPowerDevice->eDefaultPowerState : eNewPowerState; + + if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState) + { + if (psPowerDevice->pfnPrePower != IMG_NULL) + { + /* Call the device's power callback. */ + eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + /* Do any required system-layer processing. */ + eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + } + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePrePowerStateKM + + @Description + + Perform device-specific processing required before a power transition + + @Input bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + @Input ui32DeviceIndex : device index + @Input eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices, + IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* Loop through the power devices. */ + eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList, + &PVRSRVDevicePrePowerStateKM_AnyVaCb, + bAllDevices, + ui32DeviceIndex, + eNewPowerState); + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostPowerStateKM_AnyVaCb + + @Description + + Perform device-specific processing required after a power transition + + @Input psPowerDevice : the device + @Input va : variable argument list with: + bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + ui32DeviceIndex : device index + eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va) +{ + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + PVRSRV_ERROR eError; + + /*Variable Argument variables*/ + IMG_BOOL bAllDevices; + IMG_UINT32 ui32DeviceIndex; + PVRSRV_DEV_POWER_STATE eNewPowerState; + + /* WARNING: if types were not aligned to 4 bytes, this could be dangerous. */ + bAllDevices = va_arg(va, IMG_BOOL); + ui32DeviceIndex = va_arg(va, IMG_UINT32); + eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE); + + if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex)) + { + eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? + psPowerDevice->eDefaultPowerState : eNewPowerState; + + if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState) + { + /* Do any required system-layer processing. */ + eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + + if (psPowerDevice->pfnPostPower != IMG_NULL) + { + /* Call the device's power callback. */ + eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie, + eNewDevicePowerState, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + psPowerDevice->eCurrentPowerState = eNewDevicePowerState; + } + } + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostPowerStateKM + + @Description + + Perform device-specific processing required after a power transition + + @Input bAllDevices : IMG_TRUE - All devices + IMG_FALSE - Use ui32DeviceIndex + @Input ui32DeviceIndex : device index + @Input eNewPowerState : New power state + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices, + IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + /* Loop through the power devices. */ + eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList, + &PVRSRVDevicePostPowerStateKM_AnyVaCb, + bAllDevices, + ui32DeviceIndex, + eNewPowerState); + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSetDevicePowerStateKM + + @Description Set the Device into a new state + + @Input ui32DeviceIndex : device index + @Input eNewPowerState : New power state + @Input ui32CallerID : KERNEL_ID or ISR_ID + @Input bRetainMutex : If true, the power mutex is retained on exit + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + #if defined(PDUMP) + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + /* + Pdump a power-up regardless of the default state. + Then disable pdump and transition to the default power state. + This ensures that a power-up is always present in the pdump when necessary. + */ + eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON); + if(eError != PVRSRV_OK) + { + goto Exit; + } + + eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON); + + if (eError != PVRSRV_OK) + { + goto Exit; + } + + PDUMPSUSPEND(); + } + #endif /* PDUMP */ + + eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState); + if(eError != PVRSRV_OK) + { + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + PDUMPRESUME(); + } + goto Exit; + } + + eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState); + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) + { + PDUMPRESUME(); + } + +Exit: + + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError)); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSystemPrePowerStateKM + + @Description Perform processing required before a system power transition + + @Input eNewSysPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + + SysAcquireData(&psSysData); + + /* This mutex is unlocked in PVRSRVSystemPostPowerStateKM() */ + eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE); + if(eError != PVRSRV_OK) + { + return eError; + } + + if (_IsSystemStatePowered(eNewSysPowerState) != + _IsSystemStatePowered(psSysData->eCurrentPowerState)) + { + if (_IsSystemStatePowered(eNewSysPowerState)) + { + /* Return device back to its default state. */ + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; + } + else + { + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF; + } + + /* Perform device-specific transitions. */ + eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + + if (eNewSysPowerState != psSysData->eCurrentPowerState) + { + /* Perform system-specific power transitions. */ + eError = SysSystemPrePowerState(eNewSysPowerState); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + + return eError; + +ErrorExit: + + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x", + psSysData->eCurrentPowerState, eNewSysPowerState, eError)); + + /* save the power state for the re-attempt */ + psSysData->eFailedPowerState = eNewSysPowerState; + + PVRSRVPowerUnlock(KERNEL_ID); + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSystemPostPowerStateKM + + @Description Perform processing required after a system power transition + + @Input eNewSysPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYS_DATA *psSysData; + PVRSRV_DEV_POWER_STATE eNewDevicePowerState; + + SysAcquireData(&psSysData); + + if (eNewSysPowerState != psSysData->eCurrentPowerState) + { + /* Perform system-specific power transitions. */ + eError = SysSystemPostPowerState(eNewSysPowerState); + if (eError != PVRSRV_OK) + { + goto Exit; + } + } + + if (_IsSystemStatePowered(eNewSysPowerState) != + _IsSystemStatePowered(psSysData->eCurrentPowerState)) + { + if (_IsSystemStatePowered(eNewSysPowerState)) + { + /* Return device back to its default state. */ + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; + } + else + { + eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF; + } + + /* Perform device-specific power transitions. */ + eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState); + if (eError != PVRSRV_OK) + { + goto Exit; + } + } + + PVR_DPF((PVR_DBG_MESSAGE, + "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK", + psSysData->eCurrentPowerState, eNewSysPowerState)); + + psSysData->eCurrentPowerState = eNewSysPowerState; + +Exit: + + PVRSRVPowerUnlock(KERNEL_ID); + + /* PRQA S 3415 2 */ /* side effects desired */ + if (_IsSystemStatePowered(eNewSysPowerState) && + PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL)) + { + /* + Reprocess the devices' queues in case commands were blocked during + the power transition. + */ + PVRSRVScheduleDeviceCallbacks(); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVSetPowerStateKM + + @Description Set the system into a new state + + @Input eNewPowerState : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + + SysAcquireData(&psSysData); + + eError = PVRSRVSystemPrePowerStateKM(eNewSysPowerState); + if(eError != PVRSRV_OK) + { + goto ErrorExit; + } + + eError = PVRSRVSystemPostPowerStateKM(eNewSysPowerState); + if(eError != PVRSRV_OK) + { + goto ErrorExit; + } + + /* save new power state */ + psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified; + + return PVRSRV_OK; + +ErrorExit: + + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x", + psSysData->eCurrentPowerState, eNewSysPowerState, eError)); + + /* save the power state for the re-attempt */ + psSysData->eFailedPowerState = eNewSysPowerState; + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterPowerDevice + + @Description + + Registers a device with the power manager. Passes Pre/Post Power handlers + and private device handle to be passed to power handlers + + @Input ui32DeviceIndex : device index + @Input pfnPrePower : Pre power transition handler + @Input pfnPostPower : Post power transition handler + @Input pfnPreClockSpeedChange : Pre clock speed transition handler (if required) + @Input pfnPostClockSpeedChange : Post clock speed transition handler (if required) + @Input hDevCookie : Dev cookie for dev power handlers + @Input eCurrentPowerState : Current power state of the device + @Input eDefaultPowerState : Default power state of the device + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex, + PFN_PRE_POWER pfnPrePower, + PFN_POST_POWER pfnPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + if (pfnPrePower == IMG_NULL && + pfnPostPower == IMG_NULL) + { + return PVRSRVRemovePowerDevice(ui32DeviceIndex); + } + + SysAcquireData(&psSysData); + + eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_POWER_DEV), + (IMG_VOID **)&psPowerDevice, IMG_NULL, + "Power Device"); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV")); + return eError; + } + + /* setup device for power manager */ + psPowerDevice->pfnPrePower = pfnPrePower; + psPowerDevice->pfnPostPower = pfnPostPower; + psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; + psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; + psPowerDevice->hDevCookie = hDevCookie; + psPowerDevice->ui32DeviceIndex = ui32DeviceIndex; + psPowerDevice->eCurrentPowerState = eCurrentPowerState; + psPowerDevice->eDefaultPowerState = eDefaultPowerState; + + /* insert into power device list */ + List_PVRSRV_POWER_DEV_Insert(&(psSysData->psPowerDeviceList), psPowerDevice); + + return (PVRSRV_OK); +} + + +/*! +****************************************************************************** + + @Function PVRSRVRemovePowerDevice + + @Description + + Removes device from power management register. Device is located by Device Index + + @Input ui32DeviceIndex : device index + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex) +{ + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDev; + + SysAcquireData(&psSysData); + + /* find device in list and remove it */ + psPowerDev = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + + if (psPowerDev) + { + List_PVRSRV_POWER_DEV_Remove(psPowerDev); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_POWER_DEV), psPowerDev, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } + + return (PVRSRV_OK); +} + + +/*! +****************************************************************************** + + @Function PVRSRVIsDevicePowered + + @Description + + Whether the device is powered, for the purposes of lockup detection. + + @Input ui32DeviceIndex : device index + + @Return IMG_BOOL + +******************************************************************************/ +IMG_EXPORT +IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex) +{ + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + SysAcquireData(&psSysData); + + /* PRQA S 3415 2 */ /* order not important */ + if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) || + OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID)) + { + return IMG_FALSE; + } + + psPowerDevice = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + return (psPowerDevice && (psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)) + ? IMG_TRUE : IMG_FALSE; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDevicePreClockSpeedChange + + @Description + + Notification from system layer that a device clock speed change is about to happen. + + @Input ui32DeviceIndex : device index + @Input bIdleDevice : whether the device should be idled + @Input pvInfo + + @Return IMG_VOID + +******************************************************************************/ +PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + SysAcquireData(&psSysData); + + if (bIdleDevice) + { + /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */ + eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%x", eError)); + return eError; + } + } + + /*search the device and then do the pre clock speed change*/ + psPowerDevice = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + + if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange) + { + eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie, + bIdleDevice, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVDevicePreClockSpeedChange : Device %u failed, error:0x%x", + ui32DeviceIndex, eError)); + } + } + + if (bIdleDevice && eError != PVRSRV_OK) + { + PVRSRVPowerUnlock(KERNEL_ID); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDevicePostClockSpeedChange + + @Description + + Notification from system layer that a device clock speed change has just happened. + + @Input ui32DeviceIndex : device index + @Input bIdleDevice : whether the device had been idled + @Input pvInfo + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + PVRSRV_POWER_DEV *psPowerDevice; + + PVR_UNREFERENCED_PARAMETER(pvInfo); + + SysAcquireData(&psSysData); + + /*search the device and then do the post clock speed change*/ + psPowerDevice = (PVRSRV_POWER_DEV*) + List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList, + &MatchPowerDeviceIndex_AnyVaCb, + ui32DeviceIndex); + + if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange) + { + eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie, + bIdleDevice, + psPowerDevice->eCurrentPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVDevicePostClockSpeedChange : Device %u failed, error:0x%x", + ui32DeviceIndex, eError)); + } + } + + + if (bIdleDevice) + { + /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */ + PVRSRVPowerUnlock(KERNEL_ID); + } +} + +/****************************************************************************** + End of file (power.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/pvrsrv.c b/pvr-source/services4/srvkm/common/pvrsrv.c new file mode 100644 index 0000000..1b5312c --- /dev/null +++ b/pvr-source/services4/srvkm/common/pvrsrv.c @@ -0,0 +1,1846 @@ +/*************************************************************************/ /*! +@Title core services functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for core services functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "buffer_manager.h" +#include "pvr_bridge_km.h" +#include "handle.h" +#include "perproc.h" +#include "pdump_km.h" +#include "deviceid.h" +#include "ra.h" +#if defined(__linux__) +#include "sysfs.h" +#endif +#if defined(TTRACE) +#include "ttrace.h" +#endif +#include "perfkm.h" + +#include "pvrversion.h" + +#include "lists.h" + +IMG_UINT32 g_ui32InitFlags; +extern int powering_down; + +/* mark which parts of Services were initialised */ +#define INIT_DATA_ENABLE_PDUMPINIT 0x1U +#define INIT_DATA_ENABLE_TTARCE 0x2U + +/*! +****************************************************************************** + + @Function AllocateDeviceID + + @Description + + allocates a device id from the pool of valid ids + + @input psSysData : system data + + @input pui32DevID : device id to return + + @Return device id + +******************************************************************************/ +PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID) +{ + SYS_DEVICE_ID* psDeviceWalker; + SYS_DEVICE_ID* psDeviceEnd; + + psDeviceWalker = &psSysData->sDeviceID[0]; + psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices; + + /* find a free ID */ + while (psDeviceWalker < psDeviceEnd) + { + if (!psDeviceWalker->bInUse) + { + psDeviceWalker->bInUse = IMG_TRUE; + *pui32DevID = psDeviceWalker->uiID; + return PVRSRV_OK; + } + psDeviceWalker++; + } + + PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!")); + + /* Should never get here: sDeviceID[] may have been setup too small */ + PVR_ASSERT(psDeviceWalker < psDeviceEnd); + + return PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVALIABLE; +} + + +/*! +****************************************************************************** + + @Function FreeDeviceID + + @Description + + frees a device id from the pool of valid ids + + @input psSysData : system data + + @input ui32DevID : device id to free + + @Return device id + +******************************************************************************/ +PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID) +{ + SYS_DEVICE_ID* psDeviceWalker; + SYS_DEVICE_ID* psDeviceEnd; + + psDeviceWalker = &psSysData->sDeviceID[0]; + psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices; + + /* find the ID to free */ + while (psDeviceWalker < psDeviceEnd) + { + /* if matching id and in use, free */ + if ( + (psDeviceWalker->uiID == ui32DevID) && + (psDeviceWalker->bInUse) + ) + { + psDeviceWalker->bInUse = IMG_FALSE; + return PVRSRV_OK; + } + psDeviceWalker++; + } + + PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!")); + + /* should never get here */ + PVR_ASSERT(psDeviceWalker < psDeviceEnd); + + return PVRSRV_ERROR_INVALID_DEVICEID; +} + + +/*! +****************************************************************************** + + @Function ReadHWReg + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : byte offset from register base + + @Return register value + +******************************************************************************/ +#ifndef ReadHWReg +IMG_EXPORT +IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset) +{ + return *(volatile IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset); +} +#endif + + +/*! +****************************************************************************** + + @Function WriteHWReg + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : byte offset from register base + + @input ui32Value : value to write to register + + @Return register value : original reg. value + +******************************************************************************/ +#ifndef WriteHWReg +IMG_EXPORT +IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ + PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%x, Offset: %x, Value %x", + (IMG_UINTPTR_T)pvLinRegBaseAddr,ui32Offset,ui32Value)); + + *(IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset) = ui32Value; +} +#endif + + +/*! +****************************************************************************** + + @Function WriteHWRegs + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Count : register count + + @input psHWRegs : address/value register list + + @Return none + +******************************************************************************/ +#ifndef WriteHWRegs +IMG_EXPORT +IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs) +{ + while (ui32Count) + { + WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal); + psHWRegs++; + ui32Count--; + } +} +#endif + +/*! +****************************************************************************** + @Function PVRSRVEnumerateDCKM_ForEachVaCb + + @Description + + Enumerates the device node (if is of the same class as given). + + @Input psDeviceNode - The device node to be enumerated + va - variable arguments list, with: + pui32DevCount - The device count pointer (to be increased) + ppui32DevID - The pointer to the device IDs pointer (to be updated and increased) +******************************************************************************/ +static IMG_VOID PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + IMG_UINT *pui32DevCount; + PVRSRV_DEVICE_IDENTIFIER **ppsDevIdList; + + pui32DevCount = va_arg(va, IMG_UINT*); + ppsDevIdList = va_arg(va, PVRSRV_DEVICE_IDENTIFIER**); + + if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT) + { + *(*ppsDevIdList) = psDeviceNode->sDevId; + (*ppsDevIdList)++; + (*pui32DevCount)++; + } +} + + + +/*! +****************************************************************************** + + @Function PVRSRVEnumerateDevicesKM + + @Description + This function will enumerate all the devices supported by the + PowerVR services within the target system. + The function returns a list of the device ID strcutres stored either in + the services or constructed in the user mode glue component in certain + environments. The number of devices in the list is also returned. + + In a binary layered component which does not support dynamic runtime selection, + the glue code should compile to return the supported devices statically, + e.g. multiple instances of the same device if multiple devices are supported, + or the target combination of MBX and display device. + + In the case of an environment (for instance) where one MBX1 may connect to two + display devices this code would enumerate all three devices and even + non-dynamic MBX1 selection code should retain the facility to parse the list + to find the index of the MBX device + + @output pui32NumDevices : On success, contains the number of devices present + in the system + + @output psDevIdList : Pointer to called supplied buffer to receive the + list of PVRSRV_DEVICE_IDENTIFIER + + @return PVRSRV_ERROR : PVRSRV_NO_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices, + PVRSRV_DEVICE_IDENTIFIER *psDevIdList) +{ + SYS_DATA *psSysData; +/* PVRSRV_DEVICE_NODE *psDeviceNode; */ + IMG_UINT32 i; + + if (!pui32NumDevices || !psDevIdList) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* + setup input buffer to be `empty' + */ + for (i=0; i<PVRSRV_MAX_DEVICES; i++) + { + psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN; + } + + /* and zero device count */ + *pui32NumDevices = 0; + + /* + Search through the device list for services managed devices + return id info for each device and the number of devices + available + */ + List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList, + &PVRSRVEnumerateDevicesKM_ForEachVaCb, + pui32NumDevices, + &psDevIdList); + + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVInit + + @Description Initialise services + + @Input psSysData : sysdata structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData) +{ + PVRSRV_ERROR eError; + +#if defined(__linux__) + eError = PVRSRVCreateSysfsEntry(); + if (eError != PVRSRV_OK) + { + goto Error; + } +#endif + + /* Initialise Resource Manager */ + eError = ResManInit(); + if (eError != PVRSRV_OK) + { + goto Error; + } + + eError = PVRSRVPerProcessDataInit(); + if(eError != PVRSRV_OK) + { + goto Error; + } + + /* Initialise handles */ + eError = PVRSRVHandleInit(); + if(eError != PVRSRV_OK) + { + goto Error; + } + + /* Initialise Power Manager Lock */ + eError = OSCreateResource(&psSysData->sPowerStateChangeResource); + if (eError != PVRSRV_OK) + { + goto Error; + } + + /* Initialise system power state */ + psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0; + psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified; + + /* Create an event object */ + if(OSAllocMem( PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_EVENTOBJECT) , + (IMG_VOID **)&psSysData->psGlobalEventObject, 0, + "Event Object") != PVRSRV_OK) + { + + goto Error; + } + + if(OSEventObjectCreateKM("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject) != PVRSRV_OK) + { + goto Error; + } + + /* Store OS high res timer fallbacks, the system is free to overide these */ + psSysData->pfnHighResTimerCreate = OSFuncHighResTimerCreate; + psSysData->pfnHighResTimerGetus = OSFuncHighResTimerGetus; + psSysData->pfnHighResTimerDestroy = OSFuncHighResTimerDestroy; + +#if defined(TTRACE) + eError = PVRSRVTimeTraceInit(); + if (eError != PVRSRV_OK) + goto Error; + g_ui32InitFlags |= INIT_DATA_ENABLE_TTARCE; +#endif + + /* Initialise pdump */ + PDUMPINIT(); + g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT; + + PERFINIT(); + return eError; + +Error: + PVRSRVDeInit(psSysData); + return eError; +} + + + +/*! +****************************************************************************** + + @Function PVRSRVDeInit + + @Description De-Initialise services + + @Input psSysData : sysdata structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData) +{ + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(psSysData); + + if (psSysData == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed - invalid param")); + return; + } + + PERFDEINIT(); + +#if defined(TTRACE) + /* deinitialise ttrace */ + if ((g_ui32InitFlags & INIT_DATA_ENABLE_TTARCE) > 0) + { + PVRSRVTimeTraceDeinit(); + } +#endif + /* deinitialise pdump */ + if( (g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0) + { + PDUMPDEINIT(); + } + + /* destroy event object */ + if(psSysData->psGlobalEventObject) + { + OSEventObjectDestroyKM(psSysData->psGlobalEventObject); + OSFreeMem( PVRSRV_PAGEABLE_SELECT, + sizeof(PVRSRV_EVENTOBJECT), + psSysData->psGlobalEventObject, + 0); + psSysData->psGlobalEventObject = IMG_NULL; + } + + eError = PVRSRVHandleDeInit(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed")); + } + + eError = PVRSRVPerProcessDataDeInit(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed")); + } + + ResManDeInit(); +} + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterDevice + + @Description + + registers a device with the system + + @Input psSysData : sysdata structure + + @Input pfnRegisterDevice : device registration function + + @Input ui32SOCInterruptBit : SoC interrupt bit for this device + + @Output pui32DeviceIndex : unique device key (for case of multiple identical devices) + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData, + PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*), + IMG_UINT32 ui32SOCInterruptBit, + IMG_UINT32 *pui32DeviceIndex) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode; + + /* Allocate device node */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), + (IMG_VOID **)&psDeviceNode, IMG_NULL, + "Device Node") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); + + eError = pfnRegisterDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device")); + return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED); + } + + /* + make the refcount 1 and test on this to initialise device + at acquiredevinfo. On release if refcount is 1, deinitialise + and when refcount is 0 (sysdata de-alloc) deallocate the device + structures + */ + psDeviceNode->ui32RefCount = 1; + psDeviceNode->psSysData = psSysData; + psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit; + + /* all devices need a unique identifier */ + AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex); + + /* and finally insert the device into the dev-list */ + List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode); + + /* and copy back index */ + *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVInitialiseDevice + + @Description + + initialises device by index + + @Input ui32DevIndex : Index to the required device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice")); + + SysAcquireData(&psSysData); + + /* Find device in the list */ + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_TRUE); + if(!psDeviceNode) + { + /* Devinfo not in the list */ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present")); + return PVRSRV_ERROR_INIT_FAILURE; + } +/* +FoundDevice: +*/ + + PVR_ASSERT (psDeviceNode->ui32RefCount > 0); + + /* + Create the device's resource manager context. + */ + eError = PVRSRVResManConnect(IMG_NULL, &psDeviceNode->hResManContext); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call")); + return eError; + } + + /* Initialise the device */ + if(psDeviceNode->pfnInitDevice != IMG_NULL) + { + eError = psDeviceNode->pfnInitDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call")); + return eError; + } + } + + return PVRSRV_OK; +} + + +static PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVPowerLock call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex)); + return eError; + } + + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE_DEFAULT); + PVRSRVPowerUnlock(KERNEL_ID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex)); + } + return eError; +} + +/*wraps the PVRSRVDevInitCompatCheck call and prints a debugging message if failed*/ +static PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + eError = PVRSRVDevInitCompatCheck(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex)); + } + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVFinaliseSystem + + @Description + + Final part of system initialisation. + + @Input ui32DevIndex : Index to the required device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful) +{ +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem")); + + SysAcquireData(&psSysData); + + if (bInitSuccessful) + { + eError = SysFinalise(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError)); + return eError; + } + + /* Place all devices into their default power state. */ + eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList, + &PVRSRVFinaliseSystem_SetPowerState_AnyCb); + if (eError != PVRSRV_OK) + { + return eError; + } + + /* Verify microkernel compatibility for devices */ + eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList, + &PVRSRVFinaliseSystem_CompatCheck_AnyCb); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + /* Some platforms call this too early in the boot phase. */ + PDUMPENDINITPHASE(); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + /* Only check devices which specify a compatibility check callback */ + if (psDeviceNode->pfnInitDeviceCompatCheck) + return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode); + else + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVAcquireDeviceDataKM + + @Description + + Matchs a device given a device type and a device index. + + @input psDeviceNode :The device node to be matched. + + @Input va : Variable argument list with: + eDeviceType : Required device type. If type is unknown use ui32DevIndex + to locate device data + + ui32DevIndex : Index to the required device obtained from the + PVRSRVEnumerateDevice function + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static IMG_VOID * PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + PVRSRV_DEVICE_TYPE eDeviceType; + IMG_UINT32 ui32DevIndex; + + eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE); + ui32DevIndex = va_arg(va, IMG_UINT32); + + if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN && + psDeviceNode->sDevId.eDeviceType == eDeviceType) || + (eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN && + psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)) + { + return psDeviceNode; + } + else + { + return IMG_NULL; + } +} + +/*! +****************************************************************************** + + @Function PVRSRVAcquireDeviceDataKM + + @Description + + Returns device information + + @Input ui32DevIndex : Index to the required device obtained from the + PVRSRVEnumerateDevice function + + @Input eDeviceType : Required device type. If type is unknown use ui32DevIndex + to locate device data + + @Output *phDevCookie : Dev Cookie + + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32 ui32DevIndex, + PVRSRV_DEVICE_TYPE eDeviceType, + IMG_HANDLE *phDevCookie) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM")); + + SysAcquireData(&psSysData); + + /* Find device in the list */ + psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &PVRSRVAcquireDeviceDataKM_Match_AnyVaCb, + eDeviceType, + ui32DevIndex); + + + if (!psDeviceNode) + { + /* device can't be found in the list so it isn't in the system */ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present")); + return PVRSRV_ERROR_INIT_FAILURE; + } + +/*FoundDevice:*/ + + PVR_ASSERT (psDeviceNode->ui32RefCount > 0); + + /* return the dev cookie? */ + if (phDevCookie) + { + *phDevCookie = (IMG_HANDLE)psDeviceNode; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDeinitialiseDevice + + @Description + + This De-inits device + + @Input ui32DevIndex : Index to the required device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + + SysAcquireData(&psSysData); + + psDeviceNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + &MatchDeviceKM_AnyVaCb, + ui32DevIndex, + IMG_TRUE); + + if (!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex)); + return PVRSRV_ERROR_DEVICEID_NOT_FOUND; + } + + eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVPowerLock call")); + return eError; + } + + /* + Power down the device if necessary. + */ + eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex, + PVRSRV_DEV_POWER_STATE_OFF); + PVRSRVPowerUnlock(KERNEL_ID); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call")); + return eError; + } + + /* + Free the dissociated device memory. + */ + eError = ResManFreeResByCriteria(psDeviceNode->hResManContext, + RESMAN_CRITERIA_RESTYPE, + RESMAN_TYPE_DEVICEMEM_ALLOCATION, + IMG_NULL, 0); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call")); + return eError; + } + + /* + De-init the device. + */ + if(psDeviceNode->pfnDeInitDevice != IMG_NULL) + { + eError = psDeviceNode->pfnDeInitDevice(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call")); + return eError; + } + } + + /* + Close the device's resource manager context. + */ + PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE); + psDeviceNode->hResManContext = IMG_NULL; + + /* remove node from list */ + List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); + + /* deallocate id and memory */ + (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex); + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return (PVRSRV_OK); +} + + +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Timeoutus, + IMG_UINT32 ui32PollPeriodus, + IMG_BOOL bAllowPreemption) +{ +#if defined (EMULATOR) + { + PVR_UNREFERENCED_PARAMETER(bAllowPreemption); + #if !defined(__linux__) + PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); + #endif + + /* For the Emulator we want the system to stop when a lock-up is detected so the state can be analysed. + * Also the Emulator is much slower than real silicon so timeouts are not valid. + */ + do + { + if((*pui32LinMemAddr & ui32Mask) == ui32Value) + { + return PVRSRV_OK; + } + + #if defined(__linux__) + OSWaitus(ui32PollPeriodus); + #else + OSReleaseThreadQuanta(); + #endif + + } while (ui32Timeoutus); /* Endless loop only for the Emulator */ + } +#else + { + IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ + + if (bAllowPreemption) + { + PVR_ASSERT(ui32PollPeriodus >= 1000); + } + + /* PRQA S 3415,4109 1 */ /* macro format critical - leave alone */ + LOOP_UNTIL_TIMEOUT(ui32Timeoutus) + { + ui32ActualValue = (*pui32LinMemAddr & ui32Mask); + if(ui32ActualValue == ui32Value) + { + return PVRSRV_OK; + } + + if (bAllowPreemption) + { + OSSleepms(ui32PollPeriodus / 1000); + } + else + { + OSWaitus(ui32PollPeriodus); + } + } END_LOOP_UNTIL_TIMEOUT(); + + PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", + ui32Value, ui32ActualValue, ui32Mask)); + } +#endif /* #if defined (EMULATOR) */ + + return PVRSRV_ERROR_TIMEOUT; +} + + +/*Level 3 of the loop nesting*/ +static IMG_VOID PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb(BM_HEAP *psBMHeap, va_list va) +{ + IMG_CHAR **ppszStr; + IMG_UINT32 *pui32StrLen; + IMG_UINT32 ui32Mode; + PVRSRV_ERROR (*pfnGetStats)(RA_ARENA *, IMG_CHAR **, IMG_UINT32 *); + + ppszStr = va_arg(va, IMG_CHAR**); + pui32StrLen = va_arg(va, IMG_UINT32*); + ui32Mode = va_arg(va, IMG_UINT32); + + /* Would be better to pass fn pointer in the variable args list + * but MS C compiler complains with error C2066: In ANSI C, + * it is not legal to cast between a function pointer and a data pointer. + */ + switch(ui32Mode) + { + case PVRSRV_MISC_INFO_MEMSTATS_PRESENT: + pfnGetStats = &RA_GetStats; + break; + case PVRSRV_MISC_INFO_FREEMEM_PRESENT: + pfnGetStats = &RA_GetStatsFreeMem; + break; + default: + return; + } + + if(psBMHeap->pImportArena) + { + pfnGetStats(psBMHeap->pImportArena, + ppszStr, + pui32StrLen); + } + + if(psBMHeap->pVMArena) + { + pfnGetStats(psBMHeap->pVMArena, + ppszStr, + pui32StrLen); + } +} + +/*Level 2 of the loop nesting*/ +static PVRSRV_ERROR PVRSRVGetMiscInfoKM_BMContext_AnyVaCb(BM_CONTEXT *psBMContext, va_list va) +{ + + IMG_UINT32 *pui32StrLen; + IMG_INT32 *pi32Count; + IMG_CHAR **ppszStr; + IMG_UINT32 ui32Mode; + + pui32StrLen = va_arg(va, IMG_UINT32*); + pi32Count = va_arg(va, IMG_INT32*); + ppszStr = va_arg(va, IMG_CHAR**); + ui32Mode = va_arg(va, IMG_UINT32); + + CHECK_SPACE(*pui32StrLen); + *pi32Count = OSSNPrintf(*ppszStr, 100, "\nApplication Context (hDevMemContext) %p:\n", + (IMG_HANDLE)psBMContext); + UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen); + + List_BM_HEAP_ForEach_va(psBMContext->psBMHeap, + &PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb, + ppszStr, + pui32StrLen, + ui32Mode); + return PVRSRV_OK; +} + + +/*level 1 of the loop nesting*/ +static PVRSRV_ERROR PVRSRVGetMiscInfoKM_Device_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + IMG_UINT32 *pui32StrLen; + IMG_INT32 *pi32Count; + IMG_CHAR **ppszStr; + IMG_UINT32 ui32Mode; + + pui32StrLen = va_arg(va, IMG_UINT32*); + pi32Count = va_arg(va, IMG_INT32*); + ppszStr = va_arg(va, IMG_CHAR**); + ui32Mode = va_arg(va, IMG_UINT32); + + CHECK_SPACE(*pui32StrLen); + *pi32Count = OSSNPrintf(*ppszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType); + UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen); + + /* kernel context: */ + if(psDeviceNode->sDevMemoryInfo.pBMKernelContext) + { + CHECK_SPACE(*pui32StrLen); + *pi32Count = OSSNPrintf(*ppszStr, 100, "\nKernel Context:\n"); + UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen); + + List_BM_HEAP_ForEach_va(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap, + &PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb, + ppszStr, + pui32StrLen, + ui32Mode); + } + + /* double loop app contexts:heaps */ + return List_BM_CONTEXT_PVRSRV_ERROR_Any_va(psDeviceNode->sDevMemoryInfo.pBMContext, + &PVRSRVGetMiscInfoKM_BMContext_AnyVaCb, + pui32StrLen, + pi32Count, + ppszStr, + ui32Mode); +} + + +/*! +****************************************************************************** + + @Function PVRSRVGetMiscInfoKM + + @Description + Retrieves misc. info. + + @Output PVRSRV_MISC_INFO + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO_KM *psMiscInfo) +#else +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo) +#endif +{ + SYS_DATA *psSysData; + + if(!psMiscInfo) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psMiscInfo->ui32StatePresent = 0; + + /* do a basic check for uninitialised request flag */ + if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT + |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT + |PVRSRV_MISC_INFO_MEMSTATS_PRESENT + |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT + |PVRSRV_MISC_INFO_DDKVERSION_PRESENT + |PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT + |PVRSRV_MISC_INFO_RESET_PRESENT + |PVRSRV_MISC_INFO_FREEMEM_PRESENT + |PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT + |PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT + |PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT)) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + SysAcquireData(&psSysData); + + /* return SOC Timer registers */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) && + (psSysData->pvSOCTimerRegisterKM != IMG_NULL)) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT; + psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM; + psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle; + } + else + { + psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL; + psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL; + } + + /* return SOC Clock Gating registers */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) && + (psSysData->pvSOCClockGateRegsBase != IMG_NULL)) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT; + psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase; + psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize; + } + + /* memory stats */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) && + (psMiscInfo->pszMemoryStr != IMG_NULL)) + { + RA_ARENA **ppArena; +/* BM_HEAP *psBMHeap; + BM_CONTEXT *psBMContext; + PVRSRV_DEVICE_NODE *psDeviceNode;*/ + IMG_CHAR *pszStr; + IMG_UINT32 ui32StrLen; + IMG_INT32 i32Count; + + pszStr = psMiscInfo->pszMemoryStr; + ui32StrLen = psMiscInfo->ui32MemoryStrLen; + + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT; + + /* Local backing stores */ + ppArena = &psSysData->apsLocalDevMemArena[0]; + while(*ppArena) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + RA_GetStats(*ppArena, + &pszStr, + &ui32StrLen); + /* advance through the array */ + ppArena++; + } + + /* per device */ +/* psDeviceNode = psSysData->psDeviceNodeList;*/ + + /*triple loop; devices:contexts:heaps*/ + List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, + &PVRSRVGetMiscInfoKM_Device_AnyVaCb, + &ui32StrLen, + &i32Count, + &pszStr, + PVRSRV_MISC_INFO_MEMSTATS_PRESENT); + + /* attach a new line and string terminate */ + i32Count = OSSNPrintf(pszStr, 100, "\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + /* Lean version of mem stats: only show free mem on each RA */ + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0) + && psMiscInfo->pszMemoryStr) + { + IMG_CHAR *pszStr; + IMG_UINT32 ui32StrLen; + IMG_INT32 i32Count; + + pszStr = psMiscInfo->pszMemoryStr; + ui32StrLen = psMiscInfo->ui32MemoryStrLen; + + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT; + + /* triple loop over devices:contexts:heaps */ + List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, + &PVRSRVGetMiscInfoKM_Device_AnyVaCb, + &ui32StrLen, + &i32Count, + &pszStr, + PVRSRV_MISC_INFO_FREEMEM_PRESENT); + + i32Count = OSSNPrintf(pszStr, 100, "\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) && + (psSysData->psGlobalEventObject != IMG_NULL)) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT; + psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject; + } + + /* DDK version and memstats not supported in same call to GetMiscInfo */ + + if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL) + && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL) + && (psMiscInfo->pszMemoryStr != IMG_NULL)) + { + IMG_CHAR *pszStr; + IMG_UINT32 ui32StrLen; + IMG_UINT32 ui32LenStrPerNum = 12; /* string length per UI32: 10 digits + '.' + '\0' = 12 bytes */ + IMG_INT32 i32Count; + IMG_INT i; + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT; + + /* construct DDK string */ + psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ; + psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN; + psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BUILD_HI; + psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD_LO; + + pszStr = psMiscInfo->pszMemoryStr; + ui32StrLen = psMiscInfo->ui32MemoryStrLen; + + for (i=0; i<4; i++) + { + if (ui32StrLen < ui32LenStrPerNum) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + if (i != 3) + { + i32Count = OSSNPrintf(pszStr, 2, "."); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + } + } + + if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL) + { + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT; + + if(psMiscInfo->sCacheOpCtl.bDeferOp) + { + /* For now, assume deferred ops are "full" cache ops, + * and we don't need (or expect) a meminfo. + */ + psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType; + } + else + { +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = psMiscInfo->sCacheOpCtl.psKernelMemInfo; + + if(!psMiscInfo->sCacheOpCtl.psKernelMemInfo) +#else + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + PVRSRV_PER_PROCESS_DATA *psPerProc; + + if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo) +#endif + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " + "Ignoring non-deferred cache op with no meminfo")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE) + { + PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " + "Deferred cache op is pending. It is unlikely you want " + "to combine deferred cache ops with immediate ones")); + } + +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#else + psPerProc = PVRSRVFindPerProcessData(); + + if(PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + psMiscInfo->sCacheOpCtl.u.psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " + "Can't find kernel meminfo")); + return PVRSRV_ERROR_INVALID_PARAMS; + } +#endif + + if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, + 0, + psMiscInfo->sCacheOpCtl.pvBaseVAddr, + psMiscInfo->sCacheOpCtl.ui32Length)) + { + return PVRSRV_ERROR_CACHEOP_FAILED; + } + } + else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, + 0, + psMiscInfo->sCacheOpCtl.pvBaseVAddr, + psMiscInfo->sCacheOpCtl.ui32Length)) + { + return PVRSRV_ERROR_CACHEOP_FAILED; + } + } + } + } + + if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT) != 0UL) + { +#if !defined (SUPPORT_SID_INTERFACE) + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + PVRSRV_PER_PROCESS_DATA *psPerProc; +#endif + + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT; + +#if defined (SUPPORT_SID_INTERFACE) + PVR_DBG_BREAK +#else + psPerProc = PVRSRVFindPerProcessData(); + + if(PVRSRVLookupHandle(psPerProc->psHandleBase, + (IMG_PVOID *)&psKernelMemInfo, + psMiscInfo->sGetRefCountCtl.u.psKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " + "Can't find kernel meminfo")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + psMiscInfo->sGetRefCountCtl.ui32RefCount = psKernelMemInfo->ui32RefCount; +#endif + } + + if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT) != 0UL) + { + psMiscInfo->ui32PageSize = HOST_PAGESIZE(); + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT; + } + +#if defined(PVRSRV_RESET_ON_HWTIMEOUT) + if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL) + { + PVR_LOG(("User requested OS reset")); + OSPanic(); + } +#endif /* #if defined(PVRSRV_RESET_ON_HWTIMEOUT) */ + + if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT) != 0UL) + { + PVRSRVSetDCState(DC_STATE_FORCE_SWAP_TO_SYSTEM); + psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDeviceLISR + + @Description + OS-independent Device Low-level Interrupt Service Routine + + @Input psDeviceNode + + @Return IMG_BOOL : Whether any interrupts were serviced + +******************************************************************************/ +IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + SYS_DATA *psSysData; + IMG_BOOL bStatus = IMG_FALSE; + IMG_UINT32 ui32InterruptSource; + + if(!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n")); + goto out; + } + psSysData = psDeviceNode->psSysData; + + /* query the SOC/system to see whether this device was the source of the interrupt */ + ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode); + if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) + { + if(psDeviceNode->pfnDeviceISR != IMG_NULL) + { + bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData); + } + if(!powering_down) { + SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit); + } + } + +out: + return bStatus; +} + +static IMG_VOID PVRSRVSystemLISR_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va) +{ + + IMG_BOOL *pbStatus; + IMG_UINT32 *pui32InterruptSource; + IMG_UINT32 *pui32ClearInterrupts; + + pbStatus = va_arg(va, IMG_BOOL*); + pui32InterruptSource = va_arg(va, IMG_UINT32*); + pui32ClearInterrupts = va_arg(va, IMG_UINT32*); + + + if(psDeviceNode->pfnDeviceISR != IMG_NULL) + { + if(*pui32InterruptSource & psDeviceNode->ui32SOCInterruptBit) + { + if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData)) + { + /* Record if serviced any interrupts. */ + *pbStatus = IMG_TRUE; + } + /* Combine the SOC clear bits. */ + *pui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit; + } + } +} + +/*! +****************************************************************************** + + @Function PVRSRVSystemLISR + + @Description + OS-independent System Low-level Interrupt Service Routine + + @Input pvSysData + + @Return IMG_BOOL : Whether any interrupts were serviced + +******************************************************************************/ +IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = pvSysData; + IMG_BOOL bStatus = IMG_FALSE; + IMG_UINT32 ui32InterruptSource; + IMG_UINT32 ui32ClearInterrupts = 0; +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + + if(!psSysData) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n")); +/* goto out; */ + } + else + { + /* query SOC for source of interrupts */ + ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL); + + /* only proceed if PVR interrupts */ + if(ui32InterruptSource) + { + /* traverse the devices' ISR handlers */ + List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList, + &PVRSRVSystemLISR_ForEachVaCb, + &bStatus, + &ui32InterruptSource, + &ui32ClearInterrupts); + + SysClearInterrupts(psSysData, ui32ClearInterrupts); + } +/*out:*/ + } + return bStatus; +} + + +static IMG_VOID PVRSRVMISR_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if(psDeviceNode->pfnDeviceMISR != IMG_NULL) + { + (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVMISR + + @Input pvSysData + + @Description + OS-independent Medium-level Interrupt Service Routine + +******************************************************************************/ +IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = pvSysData; +/* PVRSRV_DEVICE_NODE *psDeviceNode; */ + + if(!psSysData) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n")); + return; + } + + /* Traverse the devices' MISR handlers. */ + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, + &PVRSRVMISR_ForEachCb); + + /* Process the queues. */ + if (PVRSRVProcessQueues(IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED) + { + PVRSRVProcessQueues(IMG_FALSE); + } + + /* signal global event object */ + if (psSysData->psGlobalEventObject) + { + IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM; + if(hOSEventKM) + { + OSEventObjectSignalKM(hOSEventKM); + } + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVProcessConnect + + @Description Inform services that a process has connected. + + @Input ui32PID - process ID + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags) +{ + return PVRSRVPerProcessDataConnect(ui32PID, ui32Flags); +} + + +/*! +****************************************************************************** + + @Function PVRSRVProcessDisconnect + + @Description Inform services that a process has disconnected. + + @Input ui32PID - process ID + + @Return IMG_VOID + +******************************************************************************/ +IMG_EXPORT +IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID) +{ + PVRSRVPerProcessDataDisconnect(ui32PID); +} + + +/*! +****************************************************************************** + + @Function PVRSRVSaveRestoreLiveSegments + + @Input pArena - the arena the segment was originally allocated from. + pbyBuffer - the system memory buffer set to null to get the size needed. + puiBufSize - size of system memory buffer. + bSave - IMG_TRUE if a save is required + + @Description + Function to save or restore Resources Live segments + +******************************************************************************/ +PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, + IMG_SIZE_T *puiBufSize, IMG_BOOL bSave) +{ + IMG_SIZE_T uiBytesSaved = 0; + IMG_PVOID pvLocalMemCPUVAddr; + RA_SEGMENT_DETAILS sSegDetails; + + if (hArena == IMG_NULL) + { + return (PVRSRV_ERROR_INVALID_PARAMS); + } + + sSegDetails.uiSize = 0; + sSegDetails.sCpuPhyAddr.uiAddr = 0; + sSegDetails.hSegment = 0; + + /* walk the arena segments and write live one to the buffer */ + while (RA_GetNextLiveSegment(hArena, &sSegDetails)) + { + if (pbyBuffer == IMG_NULL) + { + /* calc buffer required */ + uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize; + } + else + { + if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize) + { + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", sSegDetails.sCpuPhyAddr.uiAddr, sSegDetails.uiSize)); + + /* Map the device's local memory area onto the host. */ + pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr, + sSegDetails.uiSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + if (pvLocalMemCPUVAddr == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + + if (bSave) + { + /* write segment size then segment data */ + OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize)); + pbyBuffer += sizeof(sSegDetails.uiSize); + + OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize); + pbyBuffer += sSegDetails.uiSize; + } + else + { + IMG_UINT32 uiSize; + /* reag segment size and validate */ + OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize)); + + if (uiSize != sSegDetails.uiSize) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error")); + } + else + { + pbyBuffer += sizeof(sSegDetails.uiSize); + + OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize); + pbyBuffer += sSegDetails.uiSize; + } + } + + + uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize; + + OSUnMapPhysToLin(pvLocalMemCPUVAddr, + sSegDetails.uiSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + } + } + + if (pbyBuffer == IMG_NULL) + { + *puiBufSize = uiBytesSaved; + } + + return (PVRSRV_OK); +} + + +/*! + ****************************************************************************** + + @Function PVRSRVGetErrorStringKM + + @Description Returns a text string relating to the PVRSRV_ERROR enum. + + @Note case statement used rather than an indexed arrary to ensure text is + synchronised with the correct enum + + @Input eError : PVRSRV_ERROR enum + + @Return const IMG_CHAR * : Text string + + @Note Must be kept in sync with servicesext.h + +******************************************************************************/ + +IMG_EXPORT +const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError) +{ +/* PRQA S 5087 1 */ /* include file required here */ +#include "pvrsrv_errors.h" +} + +static IMG_VOID PVRSRVCommandCompleteCallbacks_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL) + { + /* Call the device's callback function. */ + (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVScheduleDeviceCallbacks + + @Description Schedule all device callbacks + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVScheduleDeviceCallbacks(IMG_VOID) +{ + SYS_DATA *psSysData; +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + + SysAcquireData(&psSysData); + + /*for all the device, invoke the callback function*/ + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, + &PVRSRVCommandCompleteCallbacks_ForEachCb); +} + +/*! +****************************************************************************** + + @Function PVRSRVScheduleDevices + + @Description Schedules all Services-Managed Devices to check their pending + command queues. The intention is that ScheduleDevices be called by the + 3rd party BC driver after it has finished writing new data to its output + texture. + + @Return IMG_VOID + +******************************************************************************/ +IMG_EXPORT +IMG_VOID PVRSRVScheduleDevicesKM(IMG_VOID) +{ + PVRSRVScheduleDeviceCallbacks(); +} + +/***************************************************************************** + End of file (pvrsrv.c) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/queue.c b/pvr-source/services4/srvkm/common/queue.c new file mode 100644 index 0000000..88b05a4 --- /dev/null +++ b/pvr-source/services4/srvkm/common/queue.c @@ -0,0 +1,1500 @@ +/*************************************************************************/ /*! +@Title Kernel side command queue functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "pvr_bridge_km.h" + +#include "lists.h" +#include "ttrace.h" + +/* + * The number of commands of each type which can be in flight at once. + */ +#if defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED) +#define DC_NUM_COMMANDS_PER_TYPE 2 +#else +#define DC_NUM_COMMANDS_PER_TYPE 1 +#endif + +/* + * List of private command processing function pointer tables and command + * complete tables for a device in the system. + * Each table is allocated when the device registers its private command + * processing functions. + */ +typedef struct _DEVICE_COMMAND_DATA_ +{ + PFN_CMD_PROC pfnCmdProc; + PCOMMAND_COMPLETE_DATA apsCmdCompleteData[DC_NUM_COMMANDS_PER_TYPE]; + IMG_UINT32 ui32CCBOffset; + IMG_UINT32 ui32MaxDstSyncCount; /*!< Maximum number of dest syncs */ + IMG_UINT32 ui32MaxSrcSyncCount; /*!< Maximum number of source syncs */ +} DEVICE_COMMAND_DATA; + + +#if defined(__linux__) && defined(__KERNEL__) + +#include "proc.h" + +/***************************************************************************** + FUNCTION : ProcSeqShowQueue + + PURPOSE : Print the content of queue element to /proc file + (See env/linux/proc.c:CreateProcReadEntrySeq) + + PARAMETERS : sfile - /proc seq_file + el - Element to print +*****************************************************************************/ +void ProcSeqShowQueue(struct seq_file *sfile,void* el) +{ + PVRSRV_QUEUE_INFO *psQueue = (PVRSRV_QUEUE_INFO*)el; + IMG_INT cmds = 0; + IMG_SIZE_T ui32ReadOffset; + IMG_SIZE_T ui32WriteOffset; + PVRSRV_COMMAND *psCmd; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf( sfile, + "Command Queues\n" + "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n"); + return; + } + + ui32ReadOffset = psQueue->ui32ReadOffset; + ui32WriteOffset = psQueue->ui32WriteOffset; + + while (ui32ReadOffset != ui32WriteOffset) + { + psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset); + + seq_printf(sfile, "%x %x %5u %6u %3u %5u %2u %2u %3u \n", + (IMG_UINTPTR_T)psQueue, + (IMG_UINTPTR_T)psCmd, + psCmd->ui32ProcessID, + psCmd->CommandType, + psCmd->uCmdSize, + psCmd->ui32DevIndex, + psCmd->ui32DstSyncCount, + psCmd->ui32SrcSyncCount, + psCmd->uDataSize); + { + IMG_UINT32 i; + for (i = 0; i < psCmd->ui32SrcSyncCount; i++) + { + PVRSRV_SYNC_DATA *psSyncData = psCmd->psSrcSync[i].psKernelSyncInfoKM->psSyncData; + seq_printf(sfile, " Sync %u: ROP/ROC: 0x%x/0x%x WOP/WOC: 0x%x/0x%x ROC-VA: 0x%x WOC-VA: 0x%x\n", + i, + psCmd->psSrcSync[i].ui32ReadOps2Pending, + psSyncData->ui32ReadOps2Complete, + psCmd->psSrcSync[i].ui32WriteOpsPending, + psSyncData->ui32WriteOpsComplete, + psCmd->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmd->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr); + } + } + + /* taken from UPDATE_QUEUE_ROFF in queue.h */ + ui32ReadOffset += psCmd->uCmdSize; + ui32ReadOffset &= psQueue->ui32QueueSize - 1; + cmds++; + } + + if (cmds == 0) + { + seq_printf(sfile, "%x <empty>\n", (IMG_UINTPTR_T)psQueue); + } +} + +/***************************************************************************** + FUNCTION : ProcSeqOff2ElementQueue + + PURPOSE : Transale offset to element (/proc stuff) + + PARAMETERS : sfile - /proc seq_file + off - the offset into the buffer + + RETURNS : element to print +*****************************************************************************/ +void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off) +{ + PVRSRV_QUEUE_INFO *psQueue = IMG_NULL; + SYS_DATA *psSysData; + + PVR_UNREFERENCED_PARAMETER(sfile); + + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + + psSysData = SysAcquireDataNoCheck(); + if (psSysData != IMG_NULL) + { + for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM); + } + + return psQueue; +} +#endif /* __linux__ && __KERNEL__ */ + +/*! + * Macro to return space in given command queue + */ +#define GET_SPACE_IN_CMDQ(psQueue) \ + ((((psQueue)->ui32ReadOffset - (psQueue)->ui32WriteOffset) \ + + ((psQueue)->ui32QueueSize - 1)) & ((psQueue)->ui32QueueSize - 1)) + +/*! + * Macro to Write Offset in given command queue + */ +#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \ + (psQueue)->ui32WriteOffset = ((psQueue)->ui32WriteOffset + (ui32Size)) \ + & ((psQueue)->ui32QueueSize - 1); + +/*! + * Check if an ops complete value has gone past the pending value. + * This can happen when dummy processing multiple operations, e.g. hardware recovery. + */ +#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \ + ((ui32OpsComplete) >= (ui32OpsPending)) + +/*! +**************************************************************************** + @Function : PVRSRVGetWriteOpsPending + + @Description : Gets the next operation to wait for in a sync object + + @Input : psSyncInfo - pointer to sync information struct + @Input : bIsReadOp - Is this a read or write op + + @Return : Next op value +*****************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetWriteOpsPending) +#endif +static INLINE +IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp) +{ + IMG_UINT32 ui32WriteOpsPending; + + if(bIsReadOp) + { + ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + else + { + /* + Note: This needs to be atomic and is provided the + kernel driver is single threaded (non-rentrant) + */ + ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++; + } + + return ui32WriteOpsPending; +} + +/*! +***************************************************************************** + @Function : PVRSRVGetReadOpsPending + + @Description : Gets the number of pending read ops + + @Input : psSyncInfo - pointer to sync information struct + @Input : bIsReadOp - Is this a read or write op + + @Return : Next op value +*****************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetReadOpsPending) +#endif +static INLINE +IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp) +{ + IMG_UINT32 ui32ReadOpsPending; + + if(bIsReadOp) + { + ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOps2Pending++; + } + else + { + ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOps2Pending; + } + + return ui32ReadOpsPending; +} + +static IMG_VOID QueueDumpCmdComplete(COMMAND_COMPLETE_DATA *psCmdCompleteData, + IMG_UINT32 i, + IMG_BOOL bIsSrc) +{ + PVRSRV_SYNC_OBJECT *psSyncObject; + + psSyncObject = bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->psDstSync; + + if (psCmdCompleteData->bInUse) + { + PVR_LOG(("\t%s %u: ROC DevVAddr:0x%X ROP:0x%x ROC:0x%x, WOC DevVAddr:0x%X WOP:0x%x WOC:0x%x", + bIsSrc ? "SRC" : "DEST", i, + psSyncObject[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Pending, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete, + psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending, + psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete)) + } + else + { + PVR_LOG(("\t%s %u: (Not in use)", bIsSrc ? "SRC" : "DEST", i)) + } +} + + +static IMG_VOID QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) + { + IMG_UINT32 ui32CmdCounter, ui32SyncCounter; + SYS_DATA *psSysData; + DEVICE_COMMAND_DATA *psDeviceCommandData; + PCOMMAND_COMPLETE_DATA psCmdCompleteData; + + SysAcquireData(&psSysData); + + psDeviceCommandData = psSysData->apsDeviceCommandData[psDeviceNode->sDevId.ui32DeviceIndex]; + + if (psDeviceCommandData != IMG_NULL) + { + for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++) + { + psCmdCompleteData = psDeviceCommandData[DC_FLIP_COMMAND].apsCmdCompleteData[ui32CmdCounter]; + + PVR_LOG(("Flip Command Complete Data %u for display device %u:", + ui32CmdCounter, psDeviceNode->sDevId.ui32DeviceIndex)) + + for (ui32SyncCounter = 0; + ui32SyncCounter < psCmdCompleteData->ui32SrcSyncCount; + ui32SyncCounter++) + { + QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_TRUE); + } + + for (ui32SyncCounter = 0; + ui32SyncCounter < psCmdCompleteData->ui32DstSyncCount; + ui32SyncCounter++) + { + QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_FALSE); + } + } + } + else + { + PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex)) + } + } +} + + +IMG_VOID QueueDumpDebugInfo(IMG_VOID) +{ + SYS_DATA *psSysData; + SysAcquireData(&psSysData); + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, &QueueDumpDebugInfo_ForEachCb); +} + + +/***************************************************************************** + Kernel-side functions of User->Kernel transitions +******************************************************************************/ + +static IMG_SIZE_T NearestPower2(IMG_SIZE_T ui32Value) +{ + IMG_SIZE_T ui32Temp, ui32Result = 1; + + if(!ui32Value) + return 0; + + ui32Temp = ui32Value - 1; + while(ui32Temp) + { + ui32Result <<= 1; + ui32Temp >>= 1; + } + + return ui32Result; +} + + +/*! +****************************************************************************** + + @Function PVRSRVCreateCommandQueueKM + + @Description + Creates a new command queue into which render/blt commands etc can be + inserted. + + @Input ui32QueueSize : + + @Output ppsQueueInfo : + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize, + PVRSRV_QUEUE_INFO **ppsQueueInfo) +{ + PVRSRV_QUEUE_INFO *psQueueInfo; + IMG_SIZE_T ui32Power2QueueSize = NearestPower2(ui32QueueSize); + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + IMG_HANDLE hMemBlock; + + SysAcquireData(&psSysData); + + /* allocate an internal queue info structure */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + (IMG_VOID **)&psQueueInfo, &hMemBlock, + "Queue Info"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct")); + goto ErrorExit; + } + OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO)); + + psQueueInfo->hMemBlock[0] = hMemBlock; + psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM(); + + /* allocate the command queue buffer - allow for overrun */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE, + &psQueueInfo->pvLinQueueKM, &hMemBlock, + "Command Queue"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer")); + goto ErrorExit; + } + + psQueueInfo->hMemBlock[1] = hMemBlock; + psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM; + + /* Sanity check: Should be zeroed by OSMemSet */ + PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0); + PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0); + + psQueueInfo->ui32QueueSize = ui32Power2QueueSize; + + /* if this is the first q, create a lock resource for the q list */ + if (psSysData->psQueueList == IMG_NULL) + { + eError = OSCreateResource(&psSysData->sQProcessResource); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + + /* Ensure we don't corrupt queue list, by blocking access */ + eError = OSLockResource(&psSysData->sQProcessResource, + KERNEL_ID); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + psQueueInfo->psNextKM = psSysData->psQueueList; + psSysData->psQueueList = psQueueInfo; + + eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + *ppsQueueInfo = psQueueInfo; + + return PVRSRV_OK; + +ErrorExit: + + if(psQueueInfo) + { + if(psQueueInfo->pvLinQueueKM) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + psQueueInfo->ui32QueueSize, + psQueueInfo->pvLinQueueKM, + psQueueInfo->hMemBlock[1]); + psQueueInfo->pvLinQueueKM = IMG_NULL; + } + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + psQueueInfo, + psQueueInfo->hMemBlock[0]); + /*not nulling pointer, out of scope*/ + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDestroyCommandQueueKM + + @Description Destroys a command queue + + @Input psQueueInfo : + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo) +{ + PVRSRV_QUEUE_INFO *psQueue; + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + IMG_BOOL bTimeout = IMG_TRUE; + + SysAcquireData(&psSysData); + + psQueue = psSysData->psQueueList; + + /* PRQA S 3415,4109 1 */ /* macro format critical - leave alone */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) + { + bTimeout = IMG_FALSE; + break; + } + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + if (bTimeout) + { + /* The command queue could not be flushed within the timeout period. + Allow the queue to be destroyed before returning the error code. */ + PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue")); + eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE; + goto ErrorExit; + } + + /* Ensure we don't corrupt queue list, by blocking access */ + eError = OSLockResource(&psSysData->sQProcessResource, + KERNEL_ID); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + if(psQueue == psQueueInfo) + { + psSysData->psQueueList = psQueueInfo->psNextKM; + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + NearestPower2(psQueueInfo->ui32QueueSize) + PVRSRV_MAX_CMD_SIZE, + psQueueInfo->pvLinQueueKM, + psQueueInfo->hMemBlock[1]); + psQueueInfo->pvLinQueueKM = IMG_NULL; + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + psQueueInfo, + psQueueInfo->hMemBlock[0]); + /* PRQA S 3199 1 */ /* see note */ + psQueueInfo = IMG_NULL; /*it's a copy on stack, but null it because the function doesn't end right here*/ + } + else + { + while(psQueue) + { + if(psQueue->psNextKM == psQueueInfo) + { + psQueue->psNextKM = psQueueInfo->psNextKM; + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + psQueueInfo->ui32QueueSize, + psQueueInfo->pvLinQueueKM, + psQueueInfo->hMemBlock[1]); + psQueueInfo->pvLinQueueKM = IMG_NULL; + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_QUEUE_INFO), + psQueueInfo, + psQueueInfo->hMemBlock[0]); + /* PRQA S 3199 1 */ /* see note */ + psQueueInfo = IMG_NULL; /*it's a copy on stack, but null it because the function doesn't end right here*/ + break; + } + psQueue = psQueue->psNextKM; + } + + if(!psQueue) + { + eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto ErrorExit; + } + } + + /* unlock the Q list lock resource */ + eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + + /* if the Q list is now empty, destroy the Q list lock resource */ + if (psSysData->psQueueList == IMG_NULL) + { + eError = OSDestroyResource(&psSysData->sQProcessResource); + if (eError != PVRSRV_OK) + { + goto ErrorExit; + } + } + +ErrorExit: + + return eError; +} + + +/*! +***************************************************************************** + + @Function : PVRSRVGetQueueSpaceKM + + @Description : Waits for queue access rights and checks for available space in + queue for task param structure + + @Input : psQueue - pointer to queue information struct + @Input : ui32ParamSize - size of task data structure + @Output : ppvSpace + + @Return : PVRSRV_ERROR +*****************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue, + IMG_SIZE_T ui32ParamSize, + IMG_VOID **ppvSpace) +{ + IMG_BOOL bTimeout = IMG_TRUE; + + /* round to 4byte units */ + ui32ParamSize = (ui32ParamSize+3) & 0xFFFFFFFC; + + if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE) + { + PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE)); + return PVRSRV_ERROR_CMD_TOO_BIG; + } + + /* PRQA S 3415,4109 1 */ /* macro format critical - leave alone */ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize) + { + bTimeout = IMG_FALSE; + break; + } + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + if (bTimeout == IMG_TRUE) + { + *ppvSpace = IMG_NULL; + + return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE; + } + else + { + *ppvSpace = (IMG_VOID *)((IMG_UINTPTR_T)psQueue->pvLinQueueUM + psQueue->ui32WriteOffset); + } + + return PVRSRV_OK; +} + + +/*! +***************************************************************************** + @Function PVRSRVInsertCommandKM + + @Description : + command insertion utility + - waits for space in the queue for a new command + - fills in generic command information + - returns a pointer to the caller who's expected to then fill + in the private data. + The caller should follow PVRSRVInsertCommand with PVRSRVSubmitCommand + which will update the queue's write offset so the command can be + executed. + + @Input psQueue : pointer to queue information struct + + @Output ppvCmdData : holds pointer to space in queue for private cmd data + + @Return PVRSRV_ERROR +*****************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND **ppsCommand, + IMG_UINT32 ui32DevIndex, + IMG_UINT16 CommandType, + IMG_UINT32 ui32DstSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsDstSync[], + IMG_UINT32 ui32SrcSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[], + IMG_SIZE_T ui32DataByteSize, + PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete, + IMG_HANDLE hCallbackData) +{ + PVRSRV_ERROR eError; + PVRSRV_COMMAND *psCommand; + IMG_SIZE_T ui32CommandSize; + IMG_UINT32 i; + SYS_DATA *psSysData; + DEVICE_COMMAND_DATA *psDeviceCommandData; + + /* Check that we've got enough space in our command complete data for this command */ + SysAcquireData(&psSysData); + psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex]; + + if ((psDeviceCommandData[CommandType].ui32MaxDstSyncCount < ui32DstSyncCount) || + (psDeviceCommandData[CommandType].ui32MaxSrcSyncCount < ui32SrcSyncCount)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: Too many syncs")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Round up to nearest 32 bit size so pointer arithmetic works */ + ui32DataByteSize = (ui32DataByteSize + 3UL) & ~3UL; + + /* calc. command size */ + ui32CommandSize = sizeof(PVRSRV_COMMAND) + + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT)) + + ui32DataByteSize; + + /* wait for space in queue */ + eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand); + if(eError != PVRSRV_OK) + { + return eError; + } + + psCommand->ui32ProcessID = OSGetCurrentProcessIDKM(); + + /* setup the command */ + psCommand->uCmdSize = ui32CommandSize; /* this may change if cmd shrinks */ + psCommand->ui32DevIndex = ui32DevIndex; + psCommand->CommandType = CommandType; + psCommand->ui32DstSyncCount = ui32DstSyncCount; + psCommand->ui32SrcSyncCount = ui32SrcSyncCount; + /* override QAC warning about stricter pointers */ + /* PRQA S 3305 END_PTR_ASSIGNMENTS */ + psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand) + sizeof(PVRSRV_COMMAND)); + + + psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psDstSync) + + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); + + psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psSrcSync) + + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); +/* PRQA L:END_PTR_ASSIGNMENTS */ + + psCommand->uDataSize = ui32DataByteSize;/* this may change if cmd shrinks */ + + psCommand->pfnCommandComplete = pfnCommandComplete; + psCommand->hCallbackData = hCallbackData; + + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_START, QUEUE_TOKEN_INSERTKM); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_NONE, + QUEUE_TOKEN_COMMAND_TYPE, CommandType); + + /* setup dst sync objects and their sync dependencies */ + for (i=0; i<ui32DstSyncCount; i++) + { + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC, + apsDstSync[i], PVRSRV_SYNCOP_SAMPLE); + + psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i]; + psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE); + psCommand->psDstSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE); + + PVRSRVKernelSyncInfoIncRef(apsDstSync[i], IMG_NULL); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCommand->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCommand->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCommand->psDstSync[i].ui32ReadOps2Pending, + psCommand->psDstSync[i].ui32WriteOpsPending)); + } + + /* setup src sync objects and their sync dependencies */ + for (i=0; i<ui32SrcSyncCount; i++) + { + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC, + apsSrcSync[i], PVRSRV_SYNCOP_SAMPLE); + + psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i]; + psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE); + psCommand->psSrcSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE); + + PVRSRVKernelSyncInfoIncRef(apsSrcSync[i], IMG_NULL); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCommand->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCommand->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCommand->psSrcSync[i].ui32ReadOps2Pending, + psCommand->psSrcSync[i].ui32WriteOpsPending)); + } + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_END, QUEUE_TOKEN_INSERTKM); + + /* return pointer to caller to fill out private data */ + *ppsCommand = psCommand; + + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + @Function : PVRSRVSubmitCommandKM + + @Description : + updates the queue's write offset so the command can be executed. + + @Input : psQueue - queue command is in + @Input : psCommand + + @Return : PVRSRV_ERROR +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND *psCommand) +{ + /* override QAC warnings about stricter pointers */ + /* PRQA S 3305 END_PTR_ASSIGNMENTS2 */ + /* patch pointers in the command to be kernel pointers */ + if (psCommand->ui32DstSyncCount > 0) + { + psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM) + + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)); + } + + if (psCommand->ui32SrcSyncCount > 0) + { + psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM) + + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND) + + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); + } + + psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM) + + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND) + + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)) + + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT))); + +/* PRQA L:END_PTR_ASSIGNMENTS2 */ + + /* update write offset before releasing access lock */ + UPDATE_QUEUE_WOFF(psQueue, psCommand->uCmdSize); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function CheckIfSyncIsQueued + + @Description Check if the specificed sync object is already queued and + can safely be given to the display controller. + This check is required as a 3rd party displayclass device can + have several flips "in flight" and we need to ensure that we + keep their pipeline full and don't deadlock waiting for them + to complete an operation on a surface. + + @Input psSysData : system data + @Input psCmdData : COMMAND_COMPLETE_DATA structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR CheckIfSyncIsQueued(PVRSRV_SYNC_OBJECT *psSync, COMMAND_COMPLETE_DATA *psCmdData) +{ + IMG_UINT32 k; + + if (psCmdData->bInUse) + { + for (k=0;k<psCmdData->ui32SrcSyncCount;k++) + { + if (psSync->psKernelSyncInfoKM == psCmdData->psSrcSync[k].psKernelSyncInfoKM) + { + PVRSRV_SYNC_DATA *psSyncData = psSync->psKernelSyncInfoKM->psSyncData; + IMG_UINT32 ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; + + /* + We still need to ensure that we don't we don't give a command + to the display controller if writes are outstanding on it + */ + if (ui32WriteOpsComplete == psSync->ui32WriteOpsPending) + { + return PVRSRV_OK; + } + else + { + if (SYNCOPS_STALE(ui32WriteOpsComplete, psSync->ui32WriteOpsPending)) + { + PVR_DPF((PVR_DBG_WARNING, + "CheckIfSyncIsQueued: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x", + (IMG_UINTPTR_T)psSyncData, ui32WriteOpsComplete, psSync->ui32WriteOpsPending)); + return PVRSRV_OK; + } + } + } + } + } + return PVRSRV_ERROR_FAILED_DEPENDENCIES; +} + +/*! +****************************************************************************** + + @Function PVRSRVProcessCommand + + @Description Tries to process a command + + @Input psSysData : system data + @Input psCommand : PVRSRV_COMMAND structure + @Input bFlush : Check for stale dependencies (only used for HW recovery) + + @Return PVRSRV_ERROR + +******************************************************************************/ +static +PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA *psSysData, + PVRSRV_COMMAND *psCommand, + IMG_BOOL bFlush) +{ + PVRSRV_SYNC_OBJECT *psWalkerObj; + PVRSRV_SYNC_OBJECT *psEndObj; + IMG_UINT32 i; + COMMAND_COMPLETE_DATA *psCmdCompleteData; + PVRSRV_ERROR eError = PVRSRV_OK; + IMG_UINT32 ui32WriteOpsComplete; + IMG_UINT32 ui32ReadOpsComplete; + DEVICE_COMMAND_DATA *psDeviceCommandData; + IMG_UINT32 ui32CCBOffset; + + /* satisfy sync dependencies on the DST(s) */ + psWalkerObj = psCommand->psDstSync; + psEndObj = psWalkerObj + psCommand->ui32DstSyncCount; + while (psWalkerObj < psEndObj) + { + PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData; + + ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; + ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete; + /* fail if reads or writes are not up to date */ + if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending) + || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending)) + { + if (!bFlush || + !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) || + !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending)) + { + return PVRSRV_ERROR_FAILED_DEPENDENCIES; + } + } + + psWalkerObj++; + } + + /* satisfy sync dependencies on the SRC(s) */ + psWalkerObj = psCommand->psSrcSync; + psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount; + while (psWalkerObj < psEndObj) + { + PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData; + + ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete; + ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete; + /* fail if writes are not up to date */ + if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending) + || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending)) + { + if (!bFlush && + SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) && + SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending)) + { + PVR_DPF((PVR_DBG_WARNING, + "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x", + (IMG_UINTPTR_T)psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending)); + } + + if (!bFlush || + !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) || + !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending)) + { + IMG_UINT32 j; + PVRSRV_ERROR eError; + IMG_BOOL bFound = IMG_FALSE; + + psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex]; + for (j=0;j<DC_NUM_COMMANDS_PER_TYPE;j++) + { + eError = CheckIfSyncIsQueued(psWalkerObj, psDeviceCommandData[psCommand->CommandType].apsCmdCompleteData[j]); + + if (eError == PVRSRV_OK) + { + bFound = IMG_TRUE; + } + } + if (!bFound) + return PVRSRV_ERROR_FAILED_DEPENDENCIES; + } + } + psWalkerObj++; + } + + /* validate device type */ + if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVProcessCommand: invalid DeviceType 0x%x", + psCommand->ui32DevIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* fish out the appropriate storage structure for the duration of the command */ + psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex]; + ui32CCBOffset = psDeviceCommandData[psCommand->CommandType].ui32CCBOffset; + psCmdCompleteData = psDeviceCommandData[psCommand->CommandType].apsCmdCompleteData[ui32CCBOffset]; + if (psCmdCompleteData->bInUse) + { + /* can use this to protect against concurrent execution of same command */ + return PVRSRV_ERROR_FAILED_DEPENDENCIES; + } + + /* mark the structure as in use */ + psCmdCompleteData->bInUse = IMG_TRUE; + + /* copy src updates over */ + psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount; + for (i=0; i<psCommand->ui32DstSyncCount; i++) + { + psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i]; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)", + i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psDstSync[i].ui32WriteOpsPending, + ui32CCBOffset)); + } + + psCmdCompleteData->pfnCommandComplete = psCommand->pfnCommandComplete; + psCmdCompleteData->hCallbackData = psCommand->hCallbackData; + + /* copy dst updates over */ + psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount; + for (i=0; i<psCommand->ui32SrcSyncCount; i++) + { + psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i]; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)", + i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending, + ui32CCBOffset)); + } + + /* + call the cmd specific handler: + it should: + - check the cmd specific dependencies + - setup private cmd complete structure + - execute cmd on HW + - store psCmdCompleteData `cookie' and later pass as + argument to Generic Command Complete Callback + + n.b. ui32DataSize (packet size) is useful for packet validation + */ + if (psDeviceCommandData[psCommand->CommandType].pfnCmdProc((IMG_HANDLE)psCmdCompleteData, + (IMG_UINT32)psCommand->uDataSize, + psCommand->pvData) == IMG_FALSE) + { + /* + clean-up: + free cmd complete structure + */ + psCmdCompleteData->bInUse = IMG_FALSE; + eError = PVRSRV_ERROR_CMD_NOT_PROCESSED; + } + + /* Increment the CCB offset */ + psDeviceCommandData[psCommand->CommandType].ui32CCBOffset = (ui32CCBOffset + 1) % DC_NUM_COMMANDS_PER_TYPE; + + return eError; +} + + +static IMG_VOID PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + if (psDeviceNode->bReProcessDeviceCommandComplete && + psDeviceNode->pfnDeviceCommandComplete != IMG_NULL) + { + (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVProcessQueues + + @Description Tries to process a command from each Q + + @input ui32CallerID - used to distinguish between async ISR/DPC type calls + the synchronous services driver + @input bFlush - flush commands with stale dependencies (only used for HW recovery) + + @Return PVRSRV_ERROR + +******************************************************************************/ + +IMG_EXPORT +PVRSRV_ERROR PVRSRVProcessQueues(IMG_BOOL bFlush) +{ + PVRSRV_QUEUE_INFO *psQueue; + SYS_DATA *psSysData; + PVRSRV_COMMAND *psCommand; +/* PVRSRV_DEVICE_NODE *psDeviceNode;*/ + + SysAcquireData(&psSysData); + + /* Ensure we don't corrupt queue list, by blocking access. This is required for OSs where + multiple ISR threads may exist simultaneously (eg WinXP DPC routines) + */ + while (OSLockResource(&psSysData->sQProcessResource, ISR_ID) != PVRSRV_OK) + { + OSWaitus(1); + }; + + psQueue = psSysData->psQueueList; + + if(!psQueue) + { + PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands")); + } + + if (bFlush) + { + PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS); + } + + while (psQueue) + { + while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset) + { + psCommand = (PVRSRV_COMMAND*)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset); + + if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK) + { + /* processed cmd so update queue */ + UPDATE_QUEUE_ROFF(psQueue, psCommand->uCmdSize) + continue; + } + + break; + } + psQueue = psQueue->psNextKM; + } + + if (bFlush) + { + PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS); + } + + /* Re-process command complete handlers if necessary. */ + List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, + &PVRSRVProcessQueues_ForEachCb); + + OSUnlockResource(&psSysData->sQProcessResource, ISR_ID); + + return PVRSRV_OK; +} + +#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS) +/*! +****************************************************************************** + + @Function PVRSRVCommandCompleteKM + + @Description Updates non-private command complete sync objects + + @Input hCmdCookie : command cookie + @Input bScheduleMISR : obsolete parameter + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_INTERNAL +IMG_VOID PVRSRVFreeCommandCompletePacketKM(IMG_HANDLE hCmdCookie, + IMG_BOOL bScheduleMISR) +{ + COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie; + SYS_DATA *psSysData; + + PVR_UNREFERENCED_PARAMETER(bScheduleMISR); + + SysAcquireData(&psSysData); + + /* free command complete storage */ + psCmdCompleteData->bInUse = IMG_FALSE; + + /* FIXME: This may cause unrelated devices to be woken up. */ + PVRSRVScheduleDeviceCallbacks(); + + /* the MISR is always scheduled, regardless of bScheduleMISR */ + OSScheduleMISR(psSysData); +} + +#endif /* (SUPPORT_CUSTOM_SWAP_OPERATIONS) */ + +#if defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) +extern void sgxfreq_notif_sgx_frame_done(void); +#endif /* (SYS_OMAP4_HAS_DVFS_FRAMEWORK) */ + +/*! +****************************************************************************** + + @Function PVRSRVCommandCompleteKM + + @Description Updates non-private command complete sync objects + + @Input hCmdCookie : command cookie + @Input bScheduleMISR : boolean to schedule MISR + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, + IMG_BOOL bScheduleMISR) +{ + IMG_UINT32 i; + COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie; + SYS_DATA *psSysData; + +#if defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) + sgxfreq_notif_sgx_frame_done(); +#endif /* (SYS_OMAP4_HAS_DVFS_FRAMEWORK) */ + + SysAcquireData(&psSysData); + + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_START, + QUEUE_TOKEN_COMMAND_COMPLETE); + + /* update DST(s) syncs */ + for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++) + { + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++; + + PVRSRVKernelSyncInfoDecRef(psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM, IMG_NULL); + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_DST, + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM, + PVRSRV_SYNCOP_COMPLETE); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psDstSync[i].ui32WriteOpsPending)); + } + + /* update SRC(s) syncs */ + for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++) + { + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete++; + + PVRSRVKernelSyncInfoDecRef(psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM, IMG_NULL); + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_SRC, + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM, + PVRSRV_SYNCOP_COMPLETE); + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x", + i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr, + psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending, + psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending)); + } + + PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_END, + QUEUE_TOKEN_COMMAND_COMPLETE); + + if (psCmdCompleteData->pfnCommandComplete) + { + psCmdCompleteData->pfnCommandComplete(psCmdCompleteData->hCallbackData); + } + + /* free command complete storage */ + psCmdCompleteData->bInUse = IMG_FALSE; + + /* FIXME: This may cause unrelated devices to be woken up. */ + PVRSRVScheduleDeviceCallbacks(); + + if(bScheduleMISR) + { + OSScheduleMISR(psSysData); + } +} + + + + +/*! +****************************************************************************** + + @Function PVRSRVRegisterCmdProcListKM + + @Description + + registers a list of private command processing functions with the Command + Queue Manager + + @Input ui32DevIndex : device index + + @Input ppfnCmdProcList : function ptr table of private command processors + + @Input ui32MaxSyncsPerCmd : max number of syncobjects used by command + + @Input ui32CmdCount : number of entries in function ptr table + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex, + PFN_CMD_PROC *ppfnCmdProcList, + IMG_UINT32 ui32MaxSyncsPerCmd[][2], + IMG_UINT32 ui32CmdCount) +{ + SYS_DATA *psSysData; + PVRSRV_ERROR eError; + IMG_UINT32 ui32CmdCounter, ui32CmdTypeCounter; + IMG_SIZE_T ui32AllocSize; + DEVICE_COMMAND_DATA *psDeviceCommandData; + COMMAND_COMPLETE_DATA *psCmdCompleteData; + + /* validate device type */ + if(ui32DevIndex >= SYS_DEVICE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x", + ui32DevIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* acquire system data structure */ + SysAcquireData(&psSysData); + + /* array of pointers for each command store */ + ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData); + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + ui32AllocSize, + (IMG_VOID **)&psDeviceCommandData, IMG_NULL, + "Array of Pointers for Command Store"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data")); + goto ErrorExit; + } + + psSysData->apsDeviceCommandData[ui32DevIndex] = psDeviceCommandData; + + for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++) + { + psDeviceCommandData[ui32CmdTypeCounter].pfnCmdProc = ppfnCmdProcList[ui32CmdTypeCounter]; + psDeviceCommandData[ui32CmdTypeCounter].ui32CCBOffset = 0; + psDeviceCommandData[ui32CmdTypeCounter].ui32MaxDstSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0]; + psDeviceCommandData[ui32CmdTypeCounter].ui32MaxSrcSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1]; + for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++) + { + /* + allocate storage for the sync update on command complete + */ + ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA) /* space for one GENERIC_CMD_COMPLETE */ + + ((ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0] + + ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1]) + * sizeof(PVRSRV_SYNC_OBJECT)); /* space for max sync objects */ + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + ui32AllocSize, + (IMG_VOID **)&psCmdCompleteData, + IMG_NULL, + "Command Complete Data"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d", ui32CmdTypeCounter)); + goto ErrorExit; + } + + psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = psCmdCompleteData; + + /* clear memory */ + OSMemSet(psCmdCompleteData, 0x00, ui32AllocSize); + + /* setup sync pointers */ + psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*) + (((IMG_UINTPTR_T)psCmdCompleteData) + + sizeof(COMMAND_COMPLETE_DATA)); + psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*) + (((IMG_UINTPTR_T)psCmdCompleteData->psDstSync) + + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0])); + + psCmdCompleteData->ui32AllocSize = (IMG_UINT32)ui32AllocSize; + } + } + + return PVRSRV_OK; + +ErrorExit: + + /* clean-up if things went wrong */ + if (PVRSRVRemoveCmdProcListKM(ui32DevIndex, ui32CmdCount) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRegisterCmdProcListKM: Failed to clean up after error, device 0x%x", + ui32DevIndex)); + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRemoveCmdProcListKM + + @Description + + removes a list of private command processing functions and data from the + Queue Manager + + @Input ui32DevIndex : device index + + @Input ui32CmdCount : number of entries in function ptr table + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex, + IMG_UINT32 ui32CmdCount) +{ + SYS_DATA *psSysData; + IMG_UINT32 ui32CmdTypeCounter, ui32CmdCounter; + DEVICE_COMMAND_DATA *psDeviceCommandData; + COMMAND_COMPLETE_DATA *psCmdCompleteData; + IMG_SIZE_T ui32AllocSize; + + /* validate device type */ + if(ui32DevIndex >= SYS_DEVICE_COUNT) + { + PVR_DPF((PVR_DBG_ERROR, + "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x", + ui32DevIndex)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* acquire system data structure */ + SysAcquireData(&psSysData); + + psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex]; + if(psDeviceCommandData != IMG_NULL) + { + for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++) + { + for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++) + { + psCmdCompleteData = psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter]; + + /* free the cmd complete structure array entries */ + if (psCmdCompleteData != IMG_NULL) + { + PVR_ASSERT(psCmdCompleteData->bInUse == IMG_FALSE); + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, psCmdCompleteData->ui32AllocSize, + psCmdCompleteData, IMG_NULL); + psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = IMG_NULL; + } + } + } + + /* free the cmd complete structure array for the device */ + ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData); + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psDeviceCommandData, IMG_NULL); + psSysData->apsDeviceCommandData[ui32DevIndex] = IMG_NULL; + } + + return PVRSRV_OK; +} + +/****************************************************************************** + End of file (queue.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/ra.c b/pvr-source/services4/srvkm/common/ra.c new file mode 100644 index 0000000..da48939 --- /dev/null +++ b/pvr-source/services4/srvkm/common/ra.c @@ -0,0 +1,2427 @@ +/*************************************************************************/ /*! +@Title Resource Allocator +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description + Implements generic resource allocation. The resource + allocator was originally intended to manage address spaces in + practice the resource allocator is generic and can manages arbitrary + sets of integers. + + Resources are allocated from arenas. Arena's can be created with an + initial span of resources. Further resources spans can be added to + arenas. A call back mechanism allows an arena to request further + resource spans on demand. + + Each arena maintains an ordered list of resource segments each + described by a boundary tag. Each boundary tag describes a segment + of resources which are either 'free', available for allocation, or + 'busy' currently allocated. Adjacent 'free' segments are always + coallesced to avoid fragmentation. + + For allocation, all 'free' segments are kept on lists of 'free' + segments in a table index by pvr_log2(segment size). ie Each table index + n holds 'free' segments in the size range 2**(n-1) -> 2**n. + + Allocation policy is based on an *almost* best fit + stratedy. Choosing any segment from the appropriate table entry + guarantees that we choose a segment which is with a power of 2 of + the size we are allocating. + + Allocated segments are inserted into a self scaling hash table which + maps the base resource of the span to the relevant boundary + tag. This allows the code to get back to the bounary tag without + exporting explicit boundary tag references through the API. + + Each arena has an associated quantum size, all allocations from the + arena are made in multiples of the basic quantum. + + On resource exhaustion in an arena, a callback if provided will be + used to request further resources. Resouces spans allocated by the + callback mechanism are delimited by special boundary tag markers of + zero span, 'span' markers. Span markers are never coallesced. Span + markers are used to detect when an imported span is completely free + and can be deallocated by the callback mechanism. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +/* Issues: + * - flags, flags are passed into the resource allocator but are not currently used. + * - determination, of import size, is currently braindead. + * - debug code should be moved out to own module and #ifdef'd + */ + +#include "services_headers.h" +#include "hash.h" +#include "ra.h" +#include "buffer_manager.h" +#include "osfunc.h" + +#if defined(__linux__) && defined(__KERNEL__) +#include <linux/kernel.h> +#include "pvr_uaccess.h" +#include "proc.h" +#include <linux/sched.h> +#endif + +#ifdef USE_BM_FREESPACE_CHECK +#include <stdio.h> +#endif + +/* The initial, and minimum size of the live address -> boundary tag + structure hash table. The value 64 is a fairly arbitrary + choice. The hash table resizes on demand so the value choosen is + not critical. */ +#define MINIMUM_HASH_SIZE (64) + +#if defined(VALIDATE_ARENA_TEST) + +/* This test validates the doubly linked ordered list of boundary tags, by +checking that adjacent members of the list have compatible eResourceSpan +and eResourceType values. */ + +typedef enum RESOURCE_DESCRIPTOR_TAG { + + RESOURCE_SPAN_LIVE = 10, + RESOURCE_SPAN_FREE, + IMPORTED_RESOURCE_SPAN_START, + IMPORTED_RESOURCE_SPAN_LIVE, + IMPORTED_RESOURCE_SPAN_FREE, + IMPORTED_RESOURCE_SPAN_END, + +} RESOURCE_DESCRIPTOR; + +typedef enum RESOURCE_TYPE_TAG { + + IMPORTED_RESOURCE_TYPE = 20, + NON_IMPORTED_RESOURCE_TYPE + +} RESOURCE_TYPE; + + +static IMG_UINT32 ui32BoundaryTagID = 0; + +IMG_UINT32 ValidateArena(RA_ARENA *pArena); +#endif + +/* boundary tags, used to describe a resource segment */ +struct _BT_ +{ + enum bt_type + { + btt_span, /* span markers */ + btt_free, /* free resource segment */ + btt_live /* allocated resource segment */ + } type; + + /* The base resource and extent of this segment */ + IMG_UINTPTR_T base; + IMG_SIZE_T uSize; + + /* doubly linked ordered list of all segments within the arena */ + struct _BT_ *pNextSegment; + struct _BT_ *pPrevSegment; + /* doubly linked un-ordered list of free segments. */ + struct _BT_ *pNextFree; + struct _BT_ *pPrevFree; + /* a user reference associated with this span, user references are + * currently only provided in the callback mechanism */ + BM_MAPPING *psMapping; + +#if defined(VALIDATE_ARENA_TEST) + RESOURCE_DESCRIPTOR eResourceSpan; + RESOURCE_TYPE eResourceType; + + /* This variable provides a reference (used in debug messages) to incompatible + boundary tags within the doubly linked ordered list. */ + IMG_UINT32 ui32BoundaryTagID; +#endif + +}; +typedef struct _BT_ BT; + + +/* resource allocation arena */ +struct _RA_ARENA_ +{ + /* arena name for diagnostics output */ + IMG_CHAR *name; + + /* allocations within this arena are quantum sized */ + IMG_SIZE_T uQuantum; + + /* import interface, if provided */ + IMG_BOOL (*pImportAlloc)(IMG_VOID *, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase); + IMG_VOID (*pImportFree) (IMG_VOID *, + IMG_UINTPTR_T, + BM_MAPPING *psMapping); + IMG_VOID (*pBackingStoreFree) (IMG_VOID *, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE); + + /* arbitrary handle provided by arena owner to be passed into the + * import alloc and free hooks */ + IMG_VOID *pImportHandle; + + /* head of list of free boundary tags for indexed by pvr_log2 of the + boundary tag size */ +#define FREE_TABLE_LIMIT 32 + + /* power-of-two table of free lists */ + BT *aHeadFree [FREE_TABLE_LIMIT]; + + /* resource ordered segment list */ + BT *pHeadSegment; + BT *pTailSegment; + + /* segment address to boundary tag hash table */ + HASH_TABLE *pSegmentHash; + +#ifdef RA_STATS + RA_STATISTICS sStatistics; +#endif + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS) +#define PROC_NAME_SIZE 64 + + struct proc_dir_entry* pProcInfo; + struct proc_dir_entry* pProcSegs; + + IMG_BOOL bInitProcEntry; + +#if defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + struct proc_dir_entry* pProcAllocFailThreshold; + + IMG_BOOL bFailAllocationOnce; + IMG_BOOL bFailAllocationPersist; + IMG_SIZE_T uAllocFailThreshold; + IMG_UINT32 uAllocFailMask; +#endif //defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + +#endif +}; +/* #define ENABLE_RA_DUMP 1 */ +#if defined(ENABLE_RA_DUMP) +IMG_VOID RA_Dump (RA_ARENA *pArena); +#endif + +static INLINE IMG_BOOL RA_TestAllocationFail(RA_ARENA *pArena, IMG_SIZE_T size, IMG_UINT32 buff_type) +{ + #if defined (CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + if(pArena->bFailAllocationOnce == IMG_TRUE) + { + if((size > pArena->uAllocFailThreshold) && (pArena->uAllocFailMask & buff_type)) + { + if(pArena->bFailAllocationPersist == IMG_FALSE) + pArena->bFailAllocationOnce = IMG_FALSE; + return IMG_TRUE; + } + } + #endif //CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG + return IMG_FALSE; +} + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS) + +static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el); +static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off); + +static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el); +static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off); + +#if defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) +static int RA_ProcSetAllocFailThreshold(struct file *file, const char __user *buffer, unsigned long count, void *data); +static void* RA_ProcSeqOff2AllocFailThreshold(struct seq_file * sfile, loff_t off); +static void RA_ProcSeqShowAllocFailThreshold(struct seq_file *sfile,void* el); +#endif //defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + +#endif /* defined(CONFIG_PROC_FS) && defined(DEBUG) */ + +static PVRSRV_ERROR RA_DumpHeapInfo(RA_ARENA *pArena, IMG_UINT32 ui32DebugLevel); + +#ifdef USE_BM_FREESPACE_CHECK +IMG_VOID CheckBMFreespace(IMG_VOID); +#endif + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS) +static IMG_CHAR *ReplaceSpaces(IMG_CHAR * const pS) +{ + IMG_CHAR *pT; + + for(pT = pS; *pT != 0; pT++) + { + if (*pT == ' ' || *pT == '\t') + { + *pT = '_'; + } + } + + return pS; +} +#endif + +/*! +****************************************************************************** + @Function _RequestAllocFail + + @Description Default callback allocator used if no callback is + specified, always fails to allocate further resources to the + arena. + + @Input _h - callback handle + @Input _uSize - requested allocation size + @Output _pActualSize - actual allocation size + @Input _pRef - user reference + @Input _uflags - allocation flags + @Input _pvPrivData - private data + @Input _ui32PrivDataLength - private data length + @Input _pBase - receives allocated base + + @Return IMG_FALSE, this function always fails to allocate. +******************************************************************************/ +static IMG_BOOL +_RequestAllocFail (IMG_VOID *_h, + IMG_SIZE_T _uSize, + IMG_SIZE_T *_pActualSize, + BM_MAPPING **_ppsMapping, + IMG_UINT32 _uFlags, + IMG_PVOID _pvPrivData, + IMG_UINT32 _ui32PrivDataLength, + IMG_UINTPTR_T *_pBase) +{ + PVR_UNREFERENCED_PARAMETER (_h); + PVR_UNREFERENCED_PARAMETER (_uSize); + PVR_UNREFERENCED_PARAMETER (_pActualSize); + PVR_UNREFERENCED_PARAMETER (_ppsMapping); + PVR_UNREFERENCED_PARAMETER (_uFlags); + PVR_UNREFERENCED_PARAMETER (_pBase); + PVR_UNREFERENCED_PARAMETER (_pvPrivData); + PVR_UNREFERENCED_PARAMETER (_ui32PrivDataLength); + + return IMG_FALSE; +} + +/*! +****************************************************************************** + @Function pvr_log2 + + @Description Computes the floor of the log base 2 of a unsigned integer + + @Input n - unsigned integer + + @Return Floor(Log2(n)) +******************************************************************************/ +static IMG_UINT32 +pvr_log2 (IMG_SIZE_T n) +{ + IMG_UINT32 l = 0; + n>>=1; + while (n>0) + { + n>>=1; + l++; + } + return l; +} + +/*! +****************************************************************************** + @Function _SegmentListInsertAfter + + @Description Insert a boundary tag into an arena segment list after a + specified boundary tag. + + @Input pArena - the arena. + @Input pInsertionPoint - the insertion point. + @Input pBT - the boundary tag to insert. + + @Return PVRSRV_ERROR +******************************************************************************/ +static PVRSRV_ERROR +_SegmentListInsertAfter (RA_ARENA *pArena, + BT *pInsertionPoint, + BT *pBT) +{ + PVR_ASSERT (pArena != IMG_NULL); + PVR_ASSERT (pInsertionPoint != IMG_NULL); + + if ((pInsertionPoint == IMG_NULL) || (pArena == IMG_NULL)) + { + PVR_DPF ((PVR_DBG_ERROR,"_SegmentListInsertAfter: invalid parameters")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + pBT->pNextSegment = pInsertionPoint->pNextSegment; + pBT->pPrevSegment = pInsertionPoint; + if (pInsertionPoint->pNextSegment == IMG_NULL) + pArena->pTailSegment = pBT; + else + pInsertionPoint->pNextSegment->pPrevSegment = pBT; + pInsertionPoint->pNextSegment = pBT; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function _SegmentListInsert + + @Description Insert a boundary tag into an arena segment list at the + appropriate point. + + @Input pArena - the arena. + @Input pBT - the boundary tag to insert. + + @Return None +******************************************************************************/ +static PVRSRV_ERROR +_SegmentListInsert (RA_ARENA *pArena, BT *pBT) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + /* insert into the segment chain */ + if (pArena->pHeadSegment == IMG_NULL) + { + pArena->pHeadSegment = pArena->pTailSegment = pBT; + pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL; + } + else + { + BT *pBTScan; + + if (pBT->base < pArena->pHeadSegment->base) + { + /* The base address of pBT is less than the base address of the boundary tag + at the head of the list - so insert this boundary tag at the head. */ + pBT->pNextSegment = pArena->pHeadSegment; + pArena->pHeadSegment->pPrevSegment = pBT; + pArena->pHeadSegment = pBT; + pBT->pPrevSegment = IMG_NULL; + } + else + { + + /* The base address of pBT is greater than or equal to that of the boundary tag + at the head of the list. Search for the insertion point: pBT must be inserted + before the first boundary tag with a greater base value - or at the end of the list. + */ + pBTScan = pArena->pHeadSegment; + + while ((pBTScan->pNextSegment != IMG_NULL) && (pBT->base >= pBTScan->pNextSegment->base)) + { + pBTScan = pBTScan->pNextSegment; + } + + eError = _SegmentListInsertAfter (pArena, pBTScan, pBT); + if (eError != PVRSRV_OK) + { + return eError; + } + } + } + return eError; +} + +/*! +****************************************************************************** + @Function _SegmentListRemove + + @Description Remove a boundary tag from an arena segment list. + + @Input pArena - the arena. + @Input pBT - the boundary tag to remove. + + @Return None +******************************************************************************/ +static IMG_VOID +_SegmentListRemove (RA_ARENA *pArena, BT *pBT) +{ + if (pBT->pPrevSegment == IMG_NULL) + pArena->pHeadSegment = pBT->pNextSegment; + else + pBT->pPrevSegment->pNextSegment = pBT->pNextSegment; + + if (pBT->pNextSegment == IMG_NULL) + pArena->pTailSegment = pBT->pPrevSegment; + else + pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment; +} + +/*! +****************************************************************************** + @Function _SegmentSplit + + @Description Split a segment into two, maintain the arena segment list. The + boundary tag should not be in the free table. Neither the + original or the new neighbour bounary tag will be in the free + table. + + @Input pArena - the arena. + @Input pBT - the boundary tag to split. + @Input uSize - the required segment size of boundary tag after + splitting. + + @Return New neighbour boundary tag. + +******************************************************************************/ +static BT * +_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize) +{ + BT *pNeighbour; + + PVR_ASSERT (pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: invalid parameter - pArena")); + return IMG_NULL; + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(BT), + (IMG_VOID **)&pNeighbour, IMG_NULL, + "Boundary Tag") != PVRSRV_OK) + { + return IMG_NULL; + } + + OSMemSet(pNeighbour, 0, sizeof(BT)); + +#if defined(VALIDATE_ARENA_TEST) + pNeighbour->ui32BoundaryTagID = ++ui32BoundaryTagID; +#endif + + pNeighbour->pPrevSegment = pBT; + pNeighbour->pNextSegment = pBT->pNextSegment; + if (pBT->pNextSegment == IMG_NULL) + pArena->pTailSegment = pNeighbour; + else + pBT->pNextSegment->pPrevSegment = pNeighbour; + pBT->pNextSegment = pNeighbour; + + pNeighbour->type = btt_free; + pNeighbour->uSize = pBT->uSize - uSize; + pNeighbour->base = pBT->base + uSize; + pNeighbour->psMapping = pBT->psMapping; + pBT->uSize = uSize; + +#if defined(VALIDATE_ARENA_TEST) + if (pNeighbour->pPrevSegment->eResourceType == IMPORTED_RESOURCE_TYPE) + { + pNeighbour->eResourceType = IMPORTED_RESOURCE_TYPE; + pNeighbour->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE; + } + else if (pNeighbour->pPrevSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE) + { + pNeighbour->eResourceType = NON_IMPORTED_RESOURCE_TYPE; + pNeighbour->eResourceSpan = RESOURCE_SPAN_FREE; + } + else + { + PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: pNeighbour->pPrevSegment->eResourceType unrecognized")); + PVR_DBG_BREAK; + } +#endif + + return pNeighbour; +} + +/*! +****************************************************************************** + @Function _FreeListInsert + + @Description Insert a boundary tag into an arena free table. + + @Input pArena - the arena. + @Input pBT - the boundary tag. + + @Return None + +******************************************************************************/ +static IMG_VOID +_FreeListInsert (RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + uIndex = pvr_log2 (pBT->uSize); + pBT->type = btt_free; + pBT->pNextFree = pArena->aHeadFree [uIndex]; + pBT->pPrevFree = IMG_NULL; + if (pArena->aHeadFree[uIndex] != IMG_NULL) + pArena->aHeadFree[uIndex]->pPrevFree = pBT; + pArena->aHeadFree [uIndex] = pBT; +} + +/*! +****************************************************************************** + @Function _FreeListRemove + + @Description Remove a boundary tag from an arena free table. + + @Input pArena - the arena. + @Input pBT - the boundary tag. + + @Return None + +******************************************************************************/ +static IMG_VOID +_FreeListRemove (RA_ARENA *pArena, BT *pBT) +{ + IMG_UINT32 uIndex; + uIndex = pvr_log2 (pBT->uSize); + if (pBT->pNextFree != IMG_NULL) + pBT->pNextFree->pPrevFree = pBT->pPrevFree; + if (pBT->pPrevFree == IMG_NULL) + pArena->aHeadFree[uIndex] = pBT->pNextFree; + else + pBT->pPrevFree->pNextFree = pBT->pNextFree; +} + +/*! +****************************************************************************** + @Function _BuildSpanMarker + + @Description Construct a span marker boundary tag. + + @Input pArena - arena to contain span marker + @Input base - the base of the bounary tag. + + @Return span marker boundary tag + +******************************************************************************/ +static BT * +_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + BT *pBT; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(BT), + (IMG_VOID **)&pBT, IMG_NULL, + "Boundary Tag") != PVRSRV_OK) + { + return IMG_NULL; + } + + OSMemSet(pBT, 0, sizeof(BT)); + +#if defined(VALIDATE_ARENA_TEST) + pBT->ui32BoundaryTagID = ++ui32BoundaryTagID; +#endif + + pBT->type = btt_span; + pBT->base = base; + pBT->uSize = uSize; + pBT->psMapping = IMG_NULL; + + return pBT; +} + +/*! +****************************************************************************** + @Function _BuildBT + + @Description Construct a boundary tag for a free segment. + + @Input base - the base of the resource segment. + @Input uSize - the extent of the resouce segment. + + @Return boundary tag + +******************************************************************************/ +static BT * +_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + BT *pBT; + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(BT), + (IMG_VOID **)&pBT, IMG_NULL, + "Boundary Tag") != PVRSRV_OK) + { + return IMG_NULL; + } + + OSMemSet(pBT, 0, sizeof(BT)); + +#if defined(VALIDATE_ARENA_TEST) + pBT->ui32BoundaryTagID = ++ui32BoundaryTagID; +#endif + + pBT->type = btt_free; + pBT->base = base; + pBT->uSize = uSize; + + return pBT; +} + +/*! +****************************************************************************** + @Function _InsertResource + + @Description Add a free resource segment to an arena. + + @Input pArena - the arena. + @Input base - the base of the resource segment. + @Input uSize - the extent of the resource segment. + + @Return New bucket pointer + IMG_NULL failure + +******************************************************************************/ +static BT * +_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + BT *pBT; + PVR_ASSERT (pArena!=IMG_NULL); + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: invalid parameter - pArena")); + return IMG_NULL; + } + + pBT = _BuildBT (base, uSize); + if (pBT != IMG_NULL) + { + +#if defined(VALIDATE_ARENA_TEST) + pBT->eResourceSpan = RESOURCE_SPAN_FREE; + pBT->eResourceType = NON_IMPORTED_RESOURCE_TYPE; +#endif + + if (_SegmentListInsert (pArena, pBT) != PVRSRV_OK) + { + PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: call to _SegmentListInsert failed")); + return IMG_NULL; + } + _FreeListInsert (pArena, pBT); +#ifdef RA_STATS + pArena->sStatistics.uTotalResourceCount+=uSize; + pArena->sStatistics.uFreeResourceCount+=uSize; + pArena->sStatistics.uSpanCount++; +#endif + } + return pBT; +} + +/*! +****************************************************************************** + @Function _InsertResourceSpan + + @Description Add a free resource span to an arena, complete with span markers. + + @Input pArena - the arena. + @Input base - the base of the resource segment. + @Input uSize - the extent of the resource segment. + + @Return the boundary tag representing the free resource segment, + or IMG_NULL on failure. +******************************************************************************/ +static BT * +_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + PVRSRV_ERROR eError; + BT *pSpanStart; + BT *pSpanEnd; + BT *pBT; + + PVR_ASSERT (pArena != IMG_NULL); + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_InsertResourceSpan: invalid parameter - pArena")); + return IMG_NULL; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x", + pArena->name, base, uSize)); + + pSpanStart = _BuildSpanMarker (base, uSize); + if (pSpanStart == IMG_NULL) + { + goto fail_start; + } + +#if defined(VALIDATE_ARENA_TEST) + pSpanStart->eResourceSpan = IMPORTED_RESOURCE_SPAN_START; + pSpanStart->eResourceType = IMPORTED_RESOURCE_TYPE; +#endif + + pSpanEnd = _BuildSpanMarker (base + uSize, 0); + if (pSpanEnd == IMG_NULL) + { + goto fail_end; + } + +#if defined(VALIDATE_ARENA_TEST) + pSpanEnd->eResourceSpan = IMPORTED_RESOURCE_SPAN_END; + pSpanEnd->eResourceType = IMPORTED_RESOURCE_TYPE; +#endif + + pBT = _BuildBT (base, uSize); + if (pBT == IMG_NULL) + { + goto fail_bt; + } + +#if defined(VALIDATE_ARENA_TEST) + pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE; + pBT->eResourceType = IMPORTED_RESOURCE_TYPE; +#endif + + eError = _SegmentListInsert (pArena, pSpanStart); + if (eError != PVRSRV_OK) + { + goto fail_SegListInsert; + } + + eError = _SegmentListInsertAfter (pArena, pSpanStart, pBT); + if (eError != PVRSRV_OK) + { + goto fail_SegListInsert; + } + + _FreeListInsert (pArena, pBT); + + eError = _SegmentListInsertAfter (pArena, pBT, pSpanEnd); + if (eError != PVRSRV_OK) + { + goto fail_SegListInsert; + } + +#ifdef RA_STATS + pArena->sStatistics.uTotalResourceCount+=uSize; +/* pArena->sStatistics.uFreeResourceCount+=uSize; + This has got to be wrong as uFreeResourceCount ends + up larger than uTotalResourceCount by uTotalResourceCount + - allocated memory +*/ +#endif + return pBT; + + fail_SegListInsert: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); + /*not nulling pointer, out of scope*/ + fail_bt: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL); + /*not nulling pointer, out of scope*/ + fail_end: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL); + /*not nulling pointer, out of scope*/ + fail_start: + return IMG_NULL; +} + +/*! +****************************************************************************** + @Function _FreeBT + + @Description Free a boundary tag taking care of the segment list and the + boundary tag free table. + + @Input pArena - the arena. + @Input pBT - the boundary tag to free. + @Input bFreeBackingStore - Should backing for the memory be freed + as well. + @Return None +******************************************************************************/ +static IMG_VOID +_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore) +{ + BT *pNeighbour; + IMG_UINTPTR_T uOrigBase; + IMG_SIZE_T uOrigSize; + + PVR_ASSERT (pArena!=IMG_NULL); + PVR_ASSERT (pBT!=IMG_NULL); + + if ((pArena == IMG_NULL) || (pBT == IMG_NULL)) + { + PVR_DPF ((PVR_DBG_ERROR,"_FreeBT: invalid parameter")); + return; + } + +#ifdef RA_STATS + pArena->sStatistics.uLiveSegmentCount--; + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount+=pBT->uSize; +#endif + + uOrigBase = pBT->base; + uOrigSize = pBT->uSize; + + /* try and coalesce with left neighbour */ + pNeighbour = pBT->pPrevSegment; + if (pNeighbour!=IMG_NULL + && pNeighbour->type == btt_free + && pNeighbour->base + pNeighbour->uSize == pBT->base) + { + _FreeListRemove (pArena, pNeighbour); + _SegmentListRemove (pArena, pNeighbour); + pBT->base = pNeighbour->base; + pBT->uSize += pNeighbour->uSize; + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL); + /*not nulling original pointer, already overwritten*/ +#ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount--; +#endif + } + + /* try to coalesce with right neighbour */ + pNeighbour = pBT->pNextSegment; + if (pNeighbour!=IMG_NULL + && pNeighbour->type == btt_free + && pBT->base + pBT->uSize == pNeighbour->base) + { + _FreeListRemove (pArena, pNeighbour); + _SegmentListRemove (pArena, pNeighbour); + pBT->uSize += pNeighbour->uSize; + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL); + /*not nulling original pointer, already overwritten*/ +#ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount--; +#endif + } + + /* try to free backing store memory. */ + if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore) + { + IMG_UINTPTR_T uRoundedStart, uRoundedEnd; + + /* Work out the first address we might be able to free. */ + uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum; + /* If a span is still using that address then leave it. */ + if (uRoundedStart < pBT->base) + { + uRoundedStart += pArena->uQuantum; + } + + /* Work out the last address we might be able to free. */ + uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum; + /* If a span is still using that addres then leave it. */ + if (uRoundedEnd > (pBT->base + pBT->uSize)) + { + uRoundedEnd -= pArena->uQuantum; + } + + if (uRoundedStart < uRoundedEnd) + { + pArena->pBackingStoreFree(pArena->pImportHandle, (IMG_SIZE_T)uRoundedStart, (IMG_SIZE_T)uRoundedEnd, (IMG_HANDLE)0); + } + } + + if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span + && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span) + { + BT *next = pBT->pNextSegment; + BT *prev = pBT->pPrevSegment; + _SegmentListRemove (pArena, next); + _SegmentListRemove (pArena, prev); + _SegmentListRemove (pArena, pBT); + pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping); +#ifdef RA_STATS + pArena->sStatistics.uSpanCount--; + pArena->sStatistics.uExportCount++; + pArena->sStatistics.uFreeSegmentCount--; + pArena->sStatistics.uFreeResourceCount-=pBT->uSize; + pArena->sStatistics.uTotalResourceCount-=pBT->uSize; +#endif + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL); + /*not nulling original pointer, already overwritten*/ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL); + /*not nulling original pointer, already overwritten*/ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } + else + _FreeListInsert (pArena, pBT); +} + + +/*! +****************************************************************************** + @Function _AttemptAllocAligned + + @Description Attempt an allocation from an arena. + + @Input pArena - the arena. + @Input uSize - the requested allocation size. + @Output ppsMapping - the user references associated with + the allocated segment. + @Input flags - allocation flags + @Input uAlignment - required uAlignment, or 0 + @Input uAlignmentOffset + @Output base - allocated resource base + + @Return IMG_FALSE failure + IMG_TRUE success +******************************************************************************/ +static IMG_BOOL +_AttemptAllocAligned (RA_ARENA *pArena, + IMG_SIZE_T uSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_UINT32 uAlignment, + IMG_UINT32 uAlignmentOffset, + IMG_UINTPTR_T *base) +{ + IMG_UINT32 uIndex; + PVR_ASSERT (pArena!=IMG_NULL); + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: invalid parameter - pArena")); + return IMG_FALSE; + } + + if (uAlignment>1) + uAlignmentOffset %= uAlignment; + + /* search for a near fit free boundary tag, start looking at the + pvr_log2 free table for our required size and work on up the + table. */ + uIndex = pvr_log2 (uSize); + + while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL) + uIndex++; + + while (uIndex < FREE_TABLE_LIMIT) + { + if (pArena->aHeadFree[uIndex]!=IMG_NULL) + { + /* we have a cached free boundary tag */ + BT *pBT; + + pBT = pArena->aHeadFree [uIndex]; + while (pBT!=IMG_NULL) + { + IMG_UINTPTR_T aligned_base; + + if (uAlignment>1) + aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset; + else + aligned_base = pBT->base; + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_AttemptAllocAligned: pBT-base=0x%x " + "pBT-size=0x%x alignedbase=0x%x size=0x%x", + pBT->base, pBT->uSize, aligned_base, uSize)); + + if (pBT->base + pBT->uSize >= aligned_base + uSize) + { + if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags) + { + _FreeListRemove (pArena, pBT); + + PVR_ASSERT (pBT->type == btt_free); + +#ifdef RA_STATS + pArena->sStatistics.uLiveSegmentCount++; + pArena->sStatistics.uFreeSegmentCount--; + pArena->sStatistics.uFreeResourceCount-=pBT->uSize; +#endif + + /* with uAlignment we might need to discard the front of this segment */ + if (aligned_base > pBT->base) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit (pArena, pBT, (IMG_SIZE_T)(aligned_base - pBT->base)); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour==IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed")); + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert (pArena, pBT); + #ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount+=pBT->uSize; + #endif + pBT = pNeighbour; + } + + /* the segment might be too big, if so, discard the back of the segment */ + if (pBT->uSize > uSize) + { + BT *pNeighbour; + pNeighbour = _SegmentSplit (pArena, pBT, uSize); + /* partition the buffer, create a new boundary tag */ + if (pNeighbour==IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed")); + /* Put pBT back in the list */ + _FreeListInsert (pArena, pBT); + return IMG_FALSE; + } + + _FreeListInsert (pArena, pNeighbour); + #ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize; + #endif + } + + pBT->type = btt_live; + +#if defined(VALIDATE_ARENA_TEST) + if (pBT->eResourceType == IMPORTED_RESOURCE_TYPE) + { + pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_LIVE; + } + else if (pBT->eResourceType == NON_IMPORTED_RESOURCE_TYPE) + { + pBT->eResourceSpan = RESOURCE_SPAN_LIVE; + } + else + { + PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned ERROR: pBT->eResourceType unrecognized")); + PVR_DBG_BREAK; + } +#endif + if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT)) + { + _FreeBT (pArena, pBT, IMG_FALSE); + return IMG_FALSE; + } + + if (ppsMapping!=IMG_NULL) + *ppsMapping = pBT->psMapping; + + *base = pBT->base; + + return IMG_TRUE; + } + else + { + PVR_DPF ((PVR_DBG_MESSAGE, + "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags)); + + } + } + pBT = pBT->pNextFree; + } + + } + uIndex++; + } + + return IMG_FALSE; +} + + + +/*! +****************************************************************************** + @Function RA_Create + + @Description To create a resource arena. + + @Input name - the name of the arena for diagnostic purposes. + @Input base - the base of an initial resource span or 0. + @Input uSize - the size of an initial resource span or 0. + @Input uQuantum - the arena allocation quantum. + @Input alloc - a resource allocation callback or 0. + @Input free - a resource de-allocation callback or 0. + @Input backingstore_free - a callback to free resources for spans or 0. + @Input pImportHandle - handle passed to alloc and free or 0. + + @Return arena handle, or IMG_NULL. +******************************************************************************/ +RA_ARENA * +RA_Create (IMG_CHAR *name, + IMG_UINTPTR_T base, + IMG_SIZE_T uSize, + BM_MAPPING *psMapping, + IMG_SIZE_T uQuantum, + IMG_BOOL (*imp_alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, IMG_UINT32 _flags, + IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase), + IMG_VOID (*imp_free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *), + IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE), + IMG_VOID *pImportHandle) +{ + RA_ARENA *pArena; + BT *pBT; + IMG_INT i; + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x", + name, base, uSize, (IMG_UINTPTR_T)imp_alloc, (IMG_UINTPTR_T)imp_free)); + + + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (*pArena), + (IMG_VOID **)&pArena, IMG_NULL, + "Resource Arena") != PVRSRV_OK) + { + goto arena_fail; + } + + pArena->name = name; + pArena->pImportAlloc = (imp_alloc!=IMG_NULL) ? imp_alloc : &_RequestAllocFail; + pArena->pImportFree = imp_free; + pArena->pBackingStoreFree = backingstore_free; + pArena->pImportHandle = pImportHandle; + for (i=0; i<FREE_TABLE_LIMIT; i++) + pArena->aHeadFree[i] = IMG_NULL; + pArena->pHeadSegment = IMG_NULL; + pArena->pTailSegment = IMG_NULL; + pArena->uQuantum = uQuantum; + +#ifdef RA_STATS + OSMemSet(&pArena->sStatistics, 0x00, sizeof(pArena->sStatistics)); +#endif + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS) + if(strcmp(pArena->name,"") != 0) + { + IMG_INT ret; + IMG_CHAR szProcInfoName[PROC_NAME_SIZE]; + IMG_CHAR szProcSegsName[PROC_NAME_SIZE]; + struct proc_dir_entry* (*pfnCreateProcEntrySeq)(const IMG_CHAR *, + IMG_VOID*, + pvr_next_proc_seq_t, + pvr_show_proc_seq_t, + pvr_off2element_proc_seq_t, + pvr_startstop_proc_seq_t, + write_proc_t); + + pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL); + + /* Don't put shared heap info into a per process /proc subdirectory */ + pfnCreateProcEntrySeq = pArena->bInitProcEntry ? CreateProcEntrySeq : CreatePerProcessProcEntrySeq; + + ret = snprintf(szProcInfoName, sizeof(szProcInfoName), "ra_info_%s", pArena->name); + if (ret > 0 && ret < sizeof(szProcInfoName)) + { + pArena->pProcInfo = pfnCreateProcEntrySeq(ReplaceSpaces(szProcInfoName), pArena, NULL, + RA_ProcSeqShowInfo, RA_ProcSeqOff2ElementInfo, NULL, NULL); + } + else + { + pArena->pProcInfo = 0; + PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name)); + } + + ret = snprintf(szProcSegsName, sizeof(szProcSegsName), "ra_segs_%s", pArena->name); + if (ret > 0 && ret < sizeof(szProcSegsName)) + { + pArena->pProcSegs = pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName), pArena, NULL, + RA_ProcSeqShowRegs, RA_ProcSeqOff2ElementRegs, NULL, NULL); + } + else + { + pArena->pProcSegs = 0; + PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name)); + } + +#if defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + pArena->uAllocFailThreshold = ~0; + pArena->uAllocFailMask = ~0; + pArena->bFailAllocationOnce = IMG_FALSE; + pArena->bFailAllocationPersist = IMG_FALSE; + + ret = snprintf(szProcSegsName, sizeof(szProcSegsName), "ra_fail_alloc_thld_%s", pArena->name); + if (ret > 0 && ret < sizeof(szProcSegsName)) + { + pArena->pProcAllocFailThreshold = pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName), pArena, NULL, + RA_ProcSeqShowAllocFailThreshold, RA_ProcSeqOff2AllocFailThreshold, NULL, RA_ProcSetAllocFailThreshold); + } + else + { + pArena->pProcAllocFailThreshold = 0; + PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_fail_alloc_thld proc entry for arena %s", pArena->name)); + } +#endif //defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + } +#endif /* defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS) */ + + pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE); + if (pArena->pSegmentHash==IMG_NULL) + { + goto hash_fail; + } + if (uSize>0) + { + uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum; + pBT = _InsertResource (pArena, base, uSize); + if (pBT == IMG_NULL) + { + goto insert_fail; + } + pBT->psMapping = psMapping; + + } + return pArena; + +insert_fail: + HASH_Delete (pArena->pSegmentHash); +hash_fail: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL); + /*not nulling pointer, out of scope*/ +arena_fail: + return IMG_NULL; +} + +/*! +****************************************************************************** + @Function RA_Delete + + @Description To delete a resource arena. All resources allocated from + the arena must be freed before deleting the arena. + + @Input pArena - the arena to delete. + + @Return None +******************************************************************************/ +IMG_VOID +RA_Delete (RA_ARENA *pArena) +{ + IMG_UINT32 uIndex; + + PVR_ASSERT(pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena")); + return; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Delete: name='%s'", pArena->name)); + + for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++) + pArena->aHeadFree[uIndex] = IMG_NULL; + + while (pArena->pHeadSegment != IMG_NULL) + { + BT *pBT = pArena->pHeadSegment; + + if (pBT->type != btt_free) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: allocations still exist in the arena that is being destroyed")); + PVR_DPF ((PVR_DBG_ERROR,"Likely Cause: client drivers not freeing allocations before destroying devmemcontext")); + PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: base = 0x%x size=0x%x", pBT->base, pBT->uSize)); + } + + _SegmentListRemove (pArena, pBT); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL); + /*not nulling original pointer, it has changed*/ +#ifdef RA_STATS + pArena->sStatistics.uSpanCount--; +#endif + } +#if defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS) + { + IMG_VOID (*pfnRemoveProcEntrySeq)(struct proc_dir_entry*); + + pfnRemoveProcEntrySeq = pArena->bInitProcEntry ? RemoveProcEntrySeq : RemovePerProcessProcEntrySeq; + + if (pArena->pProcInfo != 0) + { + pfnRemoveProcEntrySeq( pArena->pProcInfo ); + } + + if (pArena->pProcSegs != 0) + { + pfnRemoveProcEntrySeq( pArena->pProcSegs ); + } + +#if defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + if(pArena->pProcAllocFailThreshold != 0) + { + pfnRemoveProcEntrySeq( pArena->pProcAllocFailThreshold ); + } +#endif //defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + } +#endif + HASH_Delete (pArena->pSegmentHash); + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL); + /*not nulling pointer, copy on stack*/ +} + +/*! +****************************************************************************** + @Function RA_TestDelete + + @Description To test whether it is safe to delete a resource arena. If any + allocations have not been freed, the RA must not be deleted. + + @Input pArena - the arena to test. + + @Return IMG_BOOL - IMG_TRUE if is safe to go on and call RA_Delete. +******************************************************************************/ +IMG_BOOL +RA_TestDelete (RA_ARENA *pArena) +{ + PVR_ASSERT(pArena != IMG_NULL); + + if (pArena != IMG_NULL) + { + while (pArena->pHeadSegment != IMG_NULL) + { + BT *pBT = pArena->pHeadSegment; + if (pBT->type != btt_free) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: detected resource leak!")); + PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: base = 0x%x size=0x%x", pBT->base, pBT->uSize)); + return IMG_FALSE; + } + } + } + + return IMG_TRUE; +} + +/*! +****************************************************************************** + @Function RA_Add + + @Description To add a resource span to an arena. The span must not + overlapp with any span previously added to the arena. + + @Input pArena - the arena to add a span into. + @Input base - the base of the span. + @Input uSize - the extent of the span. + + @Return IMG_TRUE - Success + IMG_FALSE - failure +******************************************************************************/ +IMG_BOOL +RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize) +{ + PVR_ASSERT (pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena")); + return IMG_FALSE; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, uSize)); + + uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum; + return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL)); +} + +/*! +****************************************************************************** + @Function RA_Alloc + + @Description To allocate resource from an arena. + + @Input pArena - the arena + @Input uRequestSize - the size of resource segment requested. + @Output pActualSize - the actual size of resource segment + allocated, typcially rounded up by quantum. + @Output ppsMapping - the user reference associated with allocated resource span. + @Input uFlags - flags influencing allocation policy. + @Input uAlignment - the uAlignment constraint required for the + allocated segment, use 0 if uAlignment not required. + @Input uAlignmentOffset + @Input pvPrivData - opaque private data passed through to allocator + @Input ui32PrivDataLength - length of opaque private data + + @Output base - allocated base resource + + @Return IMG_TRUE - success + IMG_FALSE - failure +******************************************************************************/ +IMG_BOOL +RA_Alloc (RA_ARENA *pArena, + IMG_SIZE_T uRequestSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_UINT32 uAlignment, + IMG_UINT32 uAlignmentOffset, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *base) +{ + IMG_BOOL bResult = IMG_FALSE; + IMG_BOOL bTestAllocFail = IMG_FALSE; + IMG_SIZE_T uSize = uRequestSize; + + PVR_ASSERT (pArena!=IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Alloc: invalid parameter - pArena")); + return IMG_FALSE; + } + +#if defined(VALIDATE_ARENA_TEST) + ValidateArena(pArena); +#endif + +#ifdef USE_BM_FREESPACE_CHECK + CheckBMFreespace(); +#endif + + if (pActualSize != IMG_NULL) + { + *pActualSize = uSize; + } + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x", + pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset)); + + bTestAllocFail = RA_TestAllocationFail(pArena, uSize, ~0); + if(!bTestAllocFail) + { + /* if allocation failed then we might have an import source which + can provide more resource, else we will have to fail the + allocation to the caller. */ + bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags, + uAlignment, uAlignmentOffset, base); + if (!bResult) + { + BM_MAPPING *psImportMapping; + IMG_UINTPTR_T import_base; + IMG_SIZE_T uImportSize = uSize; + + /* + Ensure that we allocate sufficient space to meet the uAlignment + constraint + */ + if (uAlignment > pArena->uQuantum) + { + uImportSize += (uAlignment - 1); + } + + /* ensure that we import according to the quanta of this arena */ + uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum; + + bResult = + pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize, + &psImportMapping, uFlags, + pvPrivData, ui32PrivDataLength, &import_base); + if (bResult) + { + BT *pBT; + pBT = _InsertResourceSpan (pArena, import_base, uImportSize); + /* successfully import more resource, create a span to + represent it and retry the allocation attempt */ + if (pBT == IMG_NULL) + { + /* insufficient resources to insert the newly acquired span, + so free it back again */ + pArena->pImportFree(pArena->pImportHandle, import_base, + psImportMapping); + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Alloc: name='%s', size=0x%x failed!", + pArena->name, uSize)); + /* RA_Dump (arena); */ + return IMG_FALSE; + } + pBT->psMapping = psImportMapping; + #ifdef RA_STATS + pArena->sStatistics.uFreeSegmentCount++; + pArena->sStatistics.uFreeResourceCount += uImportSize; + pArena->sStatistics.uImportCount++; + pArena->sStatistics.uSpanCount++; + #endif + bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags, + uAlignment, uAlignmentOffset, + base); + if (!bResult) + { + PVR_DPF ((PVR_DBG_ERROR, + "RA_Alloc: name='%s' uAlignment failed!", + pArena->name)); + } + } + } + #ifdef RA_STATS + if (bResult) + pArena->sStatistics.uCumulativeAllocs++; + #endif + } + + PVR_DPF((PVR_DBG_MESSAGE, + "RA_Alloc: arena=%s, size=0x%x(0x%x), alignment=0x%x, "\ + "offset=0x%x, result=%d", + pArena->name, + uSize, uRequestSize, uAlignment, uAlignmentOffset, + bResult)); + + /* RA_Dump (pArena); + ra_stats (pArena); + */ + + if (!bResult) { + PVR_LOG(("RA_Alloc %s %s: arena=%s, size=0x%x(0x%x), "\ + "alignment=0x%x, offset=0x%x", + (bResult ? "SUCCESS" : "FAILED"), + (bTestAllocFail ? "in TEST_MODE!" : " "), + pArena->name, + uSize, uRequestSize, uAlignment, uAlignmentOffset)); + RA_DumpHeapInfo(pArena, ~0); + } +#if defined(VALIDATE_ARENA_TEST) + ValidateArena(pArena); +#endif + + return bResult; +} + + +#if defined(VALIDATE_ARENA_TEST) + +/*! +****************************************************************************** + @Function ValidateArena + + @Description Validate an arena by checking that adjacent members of the + double linked ordered list are compatible. PVR_DBG_BREAK and + PVR_DPF messages are used when an error is detected. + NOTE: A DEBUG build is required for PVR_DBG_BREAK and PVR_DPF + to operate. + + @Input pArena - the arena + + @Return 0 +******************************************************************************/ +IMG_UINT32 ValidateArena(RA_ARENA *pArena) +{ + BT* pSegment; + RESOURCE_DESCRIPTOR eNextSpan; + + pSegment = pArena->pHeadSegment; + + if (pSegment == IMG_NULL) + { + return 0; + } + + if (pSegment->eResourceType == IMPORTED_RESOURCE_TYPE) + { + PVR_ASSERT(pSegment->eResourceSpan == IMPORTED_RESOURCE_SPAN_START); + + while (pSegment->pNextSegment) + { + eNextSpan = pSegment->pNextSegment->eResourceSpan; + + switch (pSegment->eResourceSpan) + { + case IMPORTED_RESOURCE_SPAN_LIVE: + + if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_END))) + { + /* error - next span must be live, free or end */ + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + } + break; + + case IMPORTED_RESOURCE_SPAN_FREE: + + if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_END))) + { + /* error - next span must be live or end */ + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + } + break; + + case IMPORTED_RESOURCE_SPAN_END: + + if ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_END)) + { + /* error - next span cannot be live, free or end */ + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + } + break; + + + case IMPORTED_RESOURCE_SPAN_START: + + if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) || + (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE))) + { + /* error - next span must be live or free */ + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + } + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + break; + } + pSegment = pSegment->pNextSegment; + } + } + else if (pSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE) + { + PVR_ASSERT((pSegment->eResourceSpan == RESOURCE_SPAN_FREE) || (pSegment->eResourceSpan == RESOURCE_SPAN_LIVE)); + + while (pSegment->pNextSegment) + { + eNextSpan = pSegment->pNextSegment->eResourceSpan; + + switch (pSegment->eResourceSpan) + { + case RESOURCE_SPAN_LIVE: + + if (!((eNextSpan == RESOURCE_SPAN_FREE) || + (eNextSpan == RESOURCE_SPAN_LIVE))) + { + /* error - next span must be free or live */ + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + } + break; + + case RESOURCE_SPAN_FREE: + + if (!((eNextSpan == RESOURCE_SPAN_FREE) || + (eNextSpan == RESOURCE_SPAN_LIVE))) + { + /* error - next span must be free or live */ + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + } + break; + + default: + PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)", + pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name)); + + PVR_DBG_BREAK; + break; + } + pSegment = pSegment->pNextSegment; + } + + } + else + { + PVR_DPF ((PVR_DBG_ERROR,"ValidateArena ERROR: pSegment->eResourceType unrecognized")); + + PVR_DBG_BREAK; + } + + return 0; +} + +#endif + + +/*! +****************************************************************************** + @Function RA_Free + + @Description To free a resource segment. + + @Input pArena - the arena the segment was originally allocated from. + @Input base - the base of the resource span to free. + @Input bFreeBackingStore - Should backing store memory be freed. + + @Return None +******************************************************************************/ +IMG_VOID +RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore) +{ + BT *pBT; + + PVR_ASSERT (pArena != IMG_NULL); + + if (pArena == IMG_NULL) + { + PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena")); + return; + } + +#ifdef USE_BM_FREESPACE_CHECK + CheckBMFreespace(); +#endif + + PVR_DPF ((PVR_DBG_MESSAGE, + "RA_Free: name='%s', base=0x%x", pArena->name, base)); + + pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base); + PVR_ASSERT (pBT != IMG_NULL); + + if (pBT) + { + PVR_ASSERT (pBT->base == base); + +#ifdef RA_STATS + pArena->sStatistics.uCumulativeFrees++; +#endif + +#ifdef USE_BM_FREESPACE_CHECK +{ + IMG_BYTE* p; + IMG_BYTE* endp; + + p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(); + endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize)); + while ((IMG_UINT32)p & 3) + { + *p++ = 0xAA; + } + while (p < (IMG_BYTE*)((IMG_UINT32)endp & 0xfffffffc)) + { + *(IMG_UINT32*)p = 0xAAAAAAAA; + p += sizeof(IMG_UINT32); + } + while (p < endp) + { + *p++ = 0xAA; + } + PVR_DPF((PVR_DBG_MESSAGE,"BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",(IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(),endp-1,pBT->uSize)); +} +#endif + _FreeBT (pArena, pBT, bFreeBackingStore); + } +} + + +/*! +****************************************************************************** + @Function RA_GetNextLiveSegment + + @Description Returns details of the next live resource segments + + @Input pArena - the arena the segment was originally allocated from. + @InOut psSegDetails - rtn details of segments + + @Return IMG_TRUE if operation succeeded +******************************************************************************/ +IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails) +{ + BT *pBT; + + if (psSegDetails->hSegment) + { + pBT = (BT *)psSegDetails->hSegment; + } + else + { + RA_ARENA *pArena = (RA_ARENA *)hArena; + + pBT = pArena->pHeadSegment; + } + /* walk the arena segments and write live one to the buffer */ + while (pBT != IMG_NULL) + { + if (pBT->type == btt_live) + { + psSegDetails->uiSize = pBT->uSize; + psSegDetails->sCpuPhyAddr.uiAddr = pBT->base; + psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment; + + return IMG_TRUE; + } + + pBT = pBT->pNextSegment; + } + + psSegDetails->uiSize = 0; + psSegDetails->sCpuPhyAddr.uiAddr = 0; + psSegDetails->hSegment = (IMG_HANDLE)IMG_UNDEF; + + return IMG_FALSE; +} + + +#ifdef USE_BM_FREESPACE_CHECK +RA_ARENA* pJFSavedArena = IMG_NULL; + +IMG_VOID CheckBMFreespace(IMG_VOID) +{ + BT *pBT; + IMG_BYTE* p; + IMG_BYTE* endp; + + if (pJFSavedArena != IMG_NULL) + { + for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment) + { + if (pBT->type == btt_free) + { + p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(); + endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc); + + while ((IMG_UINT32)p & 3) + { + if (*p++ != 0xAA) + { + fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p); + for (;;); + break; + } + } + while (p < endp) + { + if (*(IMG_UINT32*)p != 0xAAAAAAAA) + { + fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p); + for (;;); + break; + } + p += 4; + } + } + } + } +} +#endif + + +#if (defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS)) || defined (RA_STATS) +static IMG_CHAR * +_BTType (IMG_INT eType) +{ + switch (eType) + { + case btt_span: return "span"; + case btt_free: return "free"; + case btt_live: return "live"; + } + return "junk"; +} +#endif /*defined(CONFIG_PROC_FS) && defined(DEBUG)*/ + +#if defined(ENABLE_RA_DUMP) +/*! +****************************************************************************** + @Function RA_Dump + + @Description To dump a readable description of an arena. Diagnostic only. + + @Input pArena - the arena to dump. + + @Return None +******************************************************************************/ +IMG_VOID +RA_Dump (RA_ARENA *pArena) +{ + BT *pBT; + PVR_ASSERT (pArena != IMG_NULL); + PVR_DPF ((PVR_DBG_MESSAGE,"Arena '%s':", pArena->name)); + PVR_DPF ((PVR_DBG_MESSAGE," alloc=%08X free=%08X handle=%08X quantum=%d", + pArena->pImportAlloc, pArena->pImportFree, pArena->pImportHandle, + pArena->uQuantum)); + PVR_DPF ((PVR_DBG_MESSAGE," segment Chain:")); + if (pArena->pHeadSegment != IMG_NULL && + pArena->pHeadSegment->pPrevSegment != IMG_NULL) + PVR_DPF ((PVR_DBG_MESSAGE," error: head boundary tag has invalid pPrevSegment")); + if (pArena->pTailSegment != IMG_NULL && + pArena->pTailSegment->pNextSegment != IMG_NULL) + PVR_DPF ((PVR_DBG_MESSAGE," error: tail boundary tag has invalid pNextSegment")); + + for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment) + { + PVR_DPF ((PVR_DBG_MESSAGE,"\tbase=0x%x size=0x%x type=%s", + (IMG_UINT32) pBT->base, pBT->uSize, _BTType (pBT->type))); + } + +#ifdef HASH_TRACE + HASH_Dump (pArena->pSegmentHash); +#endif +} +#endif /* #if defined(ENABLE_RA_DUMP) */ + +static PVRSRV_ERROR RA_DumpHeapInfo(RA_ARENA *pArena, IMG_UINT32 ui32DebugLevel) +{ + BT *pBT; + + { + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + IMG_CHAR dirname_buffer[256]; + IMG_CHAR dirname[256]; + const IMG_CHAR *proc_basename = dirname_buffer; + dirname_buffer[255] = dirname[255] = '\0'; + + OSGetProcCmdline(ui32PID, dirname_buffer, sizeof(dirname_buffer)); + PVR_LOG(("\nCommand Line of the current process with ID %u is %s", ui32PID, dirname_buffer)); + + proc_basename = OSGetPathBaseName(dirname_buffer, sizeof(dirname_buffer)); + PVR_LOG(("Base Name of the current process with ID %u is %s", ui32PID, proc_basename)); + + } + + PVR_LOG(("Arena '%s':", pArena->name)); + + PVR_LOG(( " allocCB=%p freeCB=%p handle=%p quantum=%d", + pArena->pImportAlloc, + pArena->pImportFree, + pArena->pImportHandle, + pArena->uQuantum)); + + PVR_LOG(( "span count\t\t%u", pArena->sStatistics.uSpanCount)); + + PVR_LOG(( "live segment count\t%u", pArena->sStatistics.uLiveSegmentCount)); + + PVR_LOG(( "free segment count\t%u", pArena->sStatistics.uFreeSegmentCount)); + + PVR_LOG(( "free resource count\t%u (0x%x)", + pArena->sStatistics.uFreeResourceCount, + (IMG_UINT)pArena->sStatistics.uFreeResourceCount)); + + PVR_LOG(( "total allocs\t\t%u", pArena->sStatistics.uCumulativeAllocs)); + + PVR_LOG(( "total failed allocs\t%u", pArena->sStatistics.uFailedAllocCount)); + + PVR_LOG(( "total frees\t\t%u", pArena->sStatistics.uCumulativeFrees)); + + PVR_LOG(( "import count\t\t%u", pArena->sStatistics.uImportCount)); + + PVR_LOG(( "export count\t\t%u", pArena->sStatistics.uExportCount)); + + PVR_LOG(( " segment Chain:")); + + if (pArena->pHeadSegment != IMG_NULL && + pArena->pHeadSegment->pPrevSegment != IMG_NULL) + { + PVR_LOG(( " error: head boundary tag has invalid pPrevSegment")); + } + + if (pArena->pTailSegment != IMG_NULL && + pArena->pTailSegment->pNextSegment != IMG_NULL) + { + PVR_LOG(( " error: tail boundary tag has invalid pNextSegment")); + } + + for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment) + { + PVR_LOG(( "%s base=0x%08x size=%08d(0x%08x) type=%s ref=%p", + ((pBT->type == btt_span) ? "\t\t" : "\t"), + (IMG_UINT32) pBT->base, + pBT->uSize, pBT->uSize, + _BTType(pBT->type), + pBT->psMapping)); + if(pBT->psMapping) + { + BM_MAPPING *psImportMapping = pBT->psMapping; + PVR_LOG(( "\t %p: mapping type %s, mapping count=%d, size=%08d(0x%08x), flags=0x%08x, align=0x%04x", + psImportMapping, + _BMMappingType(psImportMapping->eCpuMemoryOrigin), + psImportMapping->ui32MappingCount, + psImportMapping->uSize, psImportMapping->uSize, + psImportMapping->ui32Flags, + psImportMapping->ui32DevVAddrAlignment)); + } + } + + return PVRSRV_OK; +} + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_PVR_PROC_FS) + +#if defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) +#define _PROC_SET_ALLOC_TH_BUFFER_SZ 32 +static int RA_ProcSetAllocFailThreshold(struct file *file, const char __user *buffer, unsigned long count, void *data) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)data; + RA_ARENA *pArena; + IMG_CHAR data_buffer[_PROC_SET_ALLOC_TH_BUFFER_SZ]; + IMG_INT32 value = ~0; + IMG_UINT32 mask = ~0; + IMG_INT32 format_ret; + + if ((handlers == NULL) || (handlers->data == NULL) || (count > sizeof(data_buffer))) + { + return -EINVAL; + } + + pArena = (RA_ARENA *)handlers->data; + + count = MIN(count, sizeof(data_buffer)); + + if (pvr_copy_from_user(data_buffer, buffer, count)) + return -EINVAL; + + if (data_buffer[count - 1] != '\n') + return -EINVAL; + + data_buffer[(sizeof(data_buffer) - 1)] = '\0'; + if((sizeof(data_buffer) -1) <= count) + data_buffer[count] = '\0'; + + PVR_LOG(("Buffer from the user is %s\n", data_buffer)); + format_ret = sscanf(data_buffer, "%i:0x%x", &value, &mask); + PVR_LOG(("Value set is %i, type is %x, format %i\n", value, mask, format_ret)); + if(format_ret <= 0) + return -EINVAL; + +/* + Heap Allocation Buffer Threshold Setting - for testing purposes only + Causes allocation of a GFX buffer of type MASK for the respective heap to + fail. + Format is <threshold value number>:<buffer type mask hex value> + for example: 1000:0x01. + Value of -1 disables the allocation fail test + Value bigger than and eq. to 0 enables the allocation fail test for + the first buffer only. + Value smaller than -1 enables the buffer allocation failure for this + heap until the test disables it. +*/ + if(value < 0) + { + if(value == -1) + { + pArena->bFailAllocationPersist = pArena->bFailAllocationOnce = IMG_FALSE; + } + else if(value == -2) + { + RA_DumpHeapInfo(pArena, ~0); + } + else + { + pArena->bFailAllocationPersist = pArena->bFailAllocationOnce = IMG_TRUE; + pArena->uAllocFailThreshold = -value; + } + } + else + { + pArena->bFailAllocationPersist = 0; + pArena->bFailAllocationOnce = 1; + pArena->uAllocFailThreshold = value; + } + + if(format_ret > 1) + { + if((pArena->bFailAllocationOnce == IMG_TRUE) && (mask == 0)) + pArena->uAllocFailMask = ~0; + else + pArena->uAllocFailMask = mask; + } + PVR_LOG(("*************** User Fail Heap Allocation Settings for %s *******************************\n", + pArena->name)); + PVR_LOG(("Fail Heap Allocation is %s in %s mode\n", (pArena->bFailAllocationOnce ? "Enabled": "Disabled"), + (pArena->bFailAllocationPersist ? "Persistent": "One-Shot"))); + PVR_LOG(("Fail Heap Allocation Buffer Size Threshold is %u with a Mask of 0x%x\n", + pArena->uAllocFailThreshold, pArena->uAllocFailMask)); + PVR_LOG(("*******************************************************************************************\n")); + return (count); +} + +static void* RA_ProcSeqOff2AllocFailThreshold(struct seq_file * sfile, loff_t off) +{ + + if(off <= 1) + return (void*)(IMG_INT)(off+1); + + return 0; +} + +static void RA_ProcSeqShowAllocFailThreshold(struct seq_file *sfile,void* el) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private; + RA_ARENA *pArena = (RA_ARENA *)handlers->data; + IMG_INT off = (IMG_INT)el; + + switch (off) + { + case 1: + seq_printf(sfile, "Heap Allocation Buffer Threshold Setting - for testing purposes only\n"); + seq_printf(sfile, "Format is <threshold value number>:<buffer type mask hex value> for example: 1000:0x01\n"); + seq_printf(sfile, "Value of -1 disables the allocation fail test\n"); + seq_printf(sfile, "Value of -2 dumps the heap entries to the kernel log\n"); + seq_printf(sfile, "Value => 0 enables the allocation fail test for the first buffer with the met threshold only\n"); + seq_printf(sfile, "Value < -2 enables the buffer allocation failure for this heap until the test disables it\n"); + break; + case 2: + seq_printf(sfile, "*********** Current Settings: ********************\n"); + seq_printf(sfile,"Fail Heap Allocation is %s in %s mode\n", (pArena->bFailAllocationOnce ? "Enabled": "Disabled"), + (pArena->bFailAllocationPersist ? "Persistent": "One-Shot")); + seq_printf(sfile, "Fail Heap Allocation Buffer Size Threshold is %u with a Mask of 0x%x\n", + pArena->uAllocFailThreshold, pArena->uAllocFailMask); + break; + } +} +#endif //defined(CONFIG_PVR_PROC_FS_HEAP_ALLOC_DEBUG) + +static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private; + RA_ARENA *pArena = (RA_ARENA *)handlers->data; + IMG_INT off = (IMG_INT)el; + + switch (off) + { + case 1: + seq_printf(sfile, "quantum\t\t\t%u\n", pArena->uQuantum); + break; + case 2: + seq_printf(sfile, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle); + break; +#ifdef RA_STATS + case 3: + seq_printf(sfile,"span count\t\t%u\n", pArena->sStatistics.uSpanCount); + break; + case 4: + seq_printf(sfile, "live segment count\t%u\n", pArena->sStatistics.uLiveSegmentCount); + break; + case 5: + seq_printf(sfile, "free segment count\t%u\n", pArena->sStatistics.uFreeSegmentCount); + break; + case 6: + seq_printf(sfile, "free resource count\t%u (0x%x)\n", + pArena->sStatistics.uFreeResourceCount, + (IMG_UINT)pArena->sStatistics.uFreeResourceCount); + break; + case 7: + seq_printf(sfile, "total allocs\t\t%u\n", pArena->sStatistics.uCumulativeAllocs); + break; + case 8: + seq_printf(sfile, "total frees\t\t%u\n", pArena->sStatistics.uCumulativeFrees); + break; + case 9: + seq_printf(sfile, "import count\t\t%u\n", pArena->sStatistics.uImportCount); + break; + case 10: + seq_printf(sfile, "export count\t\t%u\n", pArena->sStatistics.uExportCount); + break; +#endif + } + +} + +static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off) +{ +#ifdef RA_STATS + if(off <= 9) +#else + if(off <= 1) +#endif + return (void*)(IMG_INT)(off+1); + return 0; +} + +static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private; + RA_ARENA *pArena = (RA_ARENA *)handlers->data; + BT *pBT = (BT*)el; + + if (el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf(sfile, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name); + return; + } + + if (pBT) + { + seq_printf(sfile, "%08x %8x %4s %08x\n", + (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type), + (IMG_UINT)pBT->psMapping); + } +} + +static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private; + RA_ARENA *pArena = (RA_ARENA *)handlers->data; + BT *pBT = 0; + + if(off == 0) + return PVR_PROC_SEQ_START_TOKEN; + + for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment); + + return (void*)pBT; +} + +#endif /* defined(CONFIG_PROC_FS) && defined(DEBUG) */ + + +#ifdef RA_STATS +/*! +****************************************************************************** + @Function RA_GetStats + + @Description Gets the arena stats and places in client buffer + + @Input pArena - the arena to print statistics for. + @Input ppszStr - caller string to fill + @Input pui32StrLen - length of caller string + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen) +{ + IMG_CHAR *pszStr = *ppszStr; + IMG_UINT32 ui32StrLen = *pui32StrLen; + IMG_INT32 i32Count; + BT *pBT; + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " allocCB=%p freeCB=%p handle=%p quantum=%d\n", + pArena->pImportAlloc, + pArena->pImportFree, + pArena->pImportHandle, + pArena->uQuantum); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%u\n", pArena->sStatistics.uSpanCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%u\n", pArena->sStatistics.uLiveSegmentCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%u\n", pArena->sStatistics.uFreeSegmentCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%u (0x%x)\n", + pArena->sStatistics.uFreeResourceCount, + (IMG_UINT)pArena->sStatistics.uFreeResourceCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%u\n", pArena->sStatistics.uCumulativeAllocs); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%u\n", pArena->sStatistics.uCumulativeFrees); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%u\n", pArena->sStatistics.uImportCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%u\n", pArena->sStatistics.uExportCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + + if (pArena->pHeadSegment != IMG_NULL && + pArena->pHeadSegment->pPrevSegment != IMG_NULL) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " error: head boundary tag has invalid pPrevSegment\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + if (pArena->pTailSegment != IMG_NULL && + pArena->pTailSegment->pNextSegment != IMG_NULL) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, " error: tail boundary tag has invalid pNextSegment\n"); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment) + { + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%x size=0x%x type=%s ref=%p\n", + (IMG_UINT32) pBT->base, + pBT->uSize, + _BTType(pBT->type), + pBT->psMapping); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + } + + *ppszStr = pszStr; + *pui32StrLen = ui32StrLen; + + return PVRSRV_OK; +} + +PVRSRV_ERROR RA_GetStatsFreeMem(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen) +{ + IMG_CHAR *pszStr = *ppszStr; + IMG_UINT32 ui32StrLen = *pui32StrLen; + IMG_INT32 i32Count; + CHECK_SPACE(ui32StrLen); + i32Count = OSSNPrintf(pszStr, 100, "Bytes free: Arena %-30s: %u (0x%x)\n", pArena->name, + pArena->sStatistics.uFreeResourceCount, + pArena->sStatistics.uFreeResourceCount); + UPDATE_SPACE(pszStr, i32Count, ui32StrLen); + *ppszStr = pszStr; + *pui32StrLen = ui32StrLen; + + return PVRSRV_OK; +} +#endif + +/****************************************************************************** + End of file (ra.c) +******************************************************************************/ + + + + diff --git a/pvr-source/services4/srvkm/common/refcount.c b/pvr-source/services4/srvkm/common/refcount.c new file mode 100644 index 0000000..fa64b23 --- /dev/null +++ b/pvr-source/services4/srvkm/common/refcount.c @@ -0,0 +1,588 @@ +/*************************************************************************/ /*! +@Title Services reference count debugging +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(PVRSRV_REFCOUNT_DEBUG) + +#include "services_headers.h" + +#ifndef __linux__ +#warning Reference count debugging is not thread-safe on this platform +#define PVRSRV_LOCK_CCB() +#define PVRSRV_UNLOCK_CCB() +#else /* __linux__ */ +#include <linux/spinlock.h> +static DEFINE_SPINLOCK(gsCCBLock); +#define PVRSRV_LOCK_CCB() \ + { \ + unsigned long flags; \ + spin_lock_irqsave(&gsCCBLock, flags); +#define PVRSRV_UNLOCK_CCB() \ + spin_unlock_irqrestore(&gsCCBLock, flags); \ + } +#endif /* __linux__ */ + +#define PVRSRV_REFCOUNT_CCB_MAX 512 +#define PVRSRV_REFCOUNT_CCB_MESG_MAX 80 + +#define PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO (1U << 0) +#define PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO (1U << 1) +#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF (1U << 2) +#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2 (1U << 3) +#define PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC (1U << 4) + +#if defined(__linux__) +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP (1U << 16) +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2 (1U << 17) +#else +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP 0 +#define PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2 0 +#endif + +#define PVRSRV_REFCOUNT_CCB_DEBUG_ALL ~0U + +/*static const IMG_UINT guiDebugMask = PVRSRV_REFCOUNT_CCB_DEBUG_ALL;*/ +static const IMG_UINT guiDebugMask = + PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO | + PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2; + +typedef struct +{ + const IMG_CHAR *pszFile; + IMG_INT iLine; + IMG_UINT32 ui32PID; + IMG_CHAR pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX]; +} +PVRSRV_REFCOUNT_CCB; + +static PVRSRV_REFCOUNT_CCB gsRefCountCCB[PVRSRV_REFCOUNT_CCB_MAX]; +static IMG_UINT giOffset; + +static const IMG_CHAR gszHeader[] = + /* 10 20 30 40 50 60 70 + * 345678901234567890123456789012345678901234567890123456789012345678901 + */ + "TYPE SYNCINFO MEMINFO MEMHANDLE OTHER REF REF' SIZE PID"; + /* NCINFO deadbeef deadbeef deadbeef deadbeef 1234 1234 deadbeef */ + +#define PVRSRV_REFCOUNT_CCB_FMT_STRING "%8.8s %8p %8p %8p %8p %.4d %.4d %.8x" + +IMG_INTERNAL +void PVRSRVDumpRefCountCCB(void) +{ + int i; + + PVRSRV_LOCK_CCB(); + + PVR_LOG(("%s", gszHeader)); + + for(i = 0; i < PVRSRV_REFCOUNT_CCB_MAX; i++) + { + PVRSRV_REFCOUNT_CCB *psRefCountCCBEntry = + &gsRefCountCCB[(giOffset + i) % PVRSRV_REFCOUNT_CCB_MAX]; + + /* Early on, we won't have MAX_REFCOUNT_CCB_SIZE messages */ + if(!psRefCountCCBEntry->pszFile) + break; + + PVR_LOG(("%s %d %s:%d", psRefCountCCBEntry->pcMesg, + psRefCountCCBEntry->ui32PID, + psRefCountCCBEntry->pszFile, + psRefCountCCBEntry->iLine)); + } + + PVRSRV_UNLOCK_CCB(); +} + +IMG_INTERNAL +void PVRSRVKernelSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + IMG_UINT32 ui32RefValue = OSAtomicRead(psKernelSyncInfo->pvRefCount); + + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "SYNCINFO", + psKernelSyncInfo, + psKernelMemInfo, + NULL, + (psKernelMemInfo) ? psKernelMemInfo->sMemBlk.hOSMemHandle : NULL, + ui32RefValue, + ui32RefValue + 1, + (psKernelMemInfo) ? psKernelMemInfo->uAllocSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + PVRSRVAcquireSyncInfoKM(psKernelSyncInfo); +} + +IMG_INTERNAL +void PVRSRVKernelSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + IMG_UINT32 ui32RefValue = OSAtomicRead(psKernelSyncInfo->pvRefCount); + + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_SYNCINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "SYNCINFO", + psKernelSyncInfo, + psKernelMemInfo, + (psKernelMemInfo) ? psKernelMemInfo->sMemBlk.hOSMemHandle : NULL, + NULL, + ui32RefValue, + ui32RefValue - 1, + (psKernelMemInfo) ? psKernelMemInfo->uAllocSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + PVRSRVReleaseSyncInfoKM(psKernelSyncInfo); +} + +IMG_INTERNAL +void PVRSRVKernelMemInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MEMINFO", + psKernelMemInfo->psKernelSyncInfo, + psKernelMemInfo, + psKernelMemInfo->sMemBlk.hOSMemHandle, + NULL, + psKernelMemInfo->ui32RefCount, + psKernelMemInfo->ui32RefCount + 1, + psKernelMemInfo->uAllocSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psKernelMemInfo->ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVKernelMemInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MEMINFO)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MEMINFO", + psKernelMemInfo->psKernelSyncInfo, + psKernelMemInfo, + psKernelMemInfo->sMemBlk.hOSMemHandle, + NULL, + psKernelMemInfo->ui32RefCount, + psKernelMemInfo->ui32RefCount - 1, + psKernelMemInfo->uAllocSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psKernelMemInfo->ui32RefCount--; +} + +IMG_INTERNAL +void PVRSRVBMBufIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32RefCount, + pBuf->ui32RefCount + 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVBMBufDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32RefCount, + pBuf->ui32RefCount - 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32RefCount--; +} + +IMG_INTERNAL +void PVRSRVBMBufIncExport2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF2", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32ExportCount, + pBuf->ui32ExportCount + 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32ExportCount++; +} + +IMG_INTERNAL +void PVRSRVBMBufDecExport2(const IMG_CHAR *pszFile, IMG_INT iLine, BM_BUF *pBuf) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_BUF2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_BUF2", + NULL, + NULL, + BM_HandleToOSMemHandle(pBuf), + pBuf, + pBuf->ui32ExportCount, + pBuf->ui32ExportCount - 1, + (pBuf->pMapping) ? pBuf->pMapping->uSize : 0); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + pBuf->ui32ExportCount--; +} + +IMG_INTERNAL +void PVRSRVBMXProcIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_XPROC", + NULL, + NULL, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle, + (IMG_VOID *) ui32Index, + gXProcWorkaroundShareData[ui32Index].ui32RefCount, + gXProcWorkaroundShareData[ui32Index].ui32RefCount + 1, + gXProcWorkaroundShareData[ui32Index].ui32Size); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + gXProcWorkaroundShareData[ui32Index].ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVBMXProcDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_BM_XPROC)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "BM_XPROC", + NULL, + NULL, + gXProcWorkaroundShareData[ui32Index].hOSMemHandle, + (IMG_VOID *) ui32Index, + gXProcWorkaroundShareData[ui32Index].ui32RefCount, + gXProcWorkaroundShareData[ui32Index].ui32RefCount - 1, + gXProcWorkaroundShareData[ui32Index].ui32Size); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + gXProcWorkaroundShareData[ui32Index].ui32RefCount--; +} + +#if defined(__linux__) + +/* mmap refcounting is Linux specific */ + +IMG_INTERNAL +void PVRSRVOffsetStructIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32RefCount, + psOffsetStruct->ui32RefCount + 1, + psOffsetStruct->ui32RealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32RefCount++; +} + +IMG_INTERNAL +void PVRSRVOffsetStructDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32RefCount, + psOffsetStruct->ui32RefCount - 1, + psOffsetStruct->ui32RealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32RefCount--; +} + +IMG_INTERNAL +void PVRSRVOffsetStructIncMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP2", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32Mapped, + psOffsetStruct->ui32Mapped + 1, + psOffsetStruct->ui32RealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32Mapped++; +} + +IMG_INTERNAL +void PVRSRVOffsetStructDecMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct) +{ + if(!(guiDebugMask & PVRSRV_REFCOUNT_CCB_DEBUG_MMAP2)) + goto skip; + + PVRSRV_LOCK_CCB(); + + gsRefCountCCB[giOffset].pszFile = pszFile; + gsRefCountCCB[giOffset].iLine = iLine; + gsRefCountCCB[giOffset].ui32PID = OSGetCurrentProcessIDKM(); + snprintf(gsRefCountCCB[giOffset].pcMesg, + PVRSRV_REFCOUNT_CCB_MESG_MAX - 1, + PVRSRV_REFCOUNT_CCB_FMT_STRING, + "MMAP2", + NULL, + NULL, + psOffsetStruct->psLinuxMemArea, + psOffsetStruct, + psOffsetStruct->ui32Mapped, + psOffsetStruct->ui32Mapped - 1, + psOffsetStruct->ui32RealByteSize); + gsRefCountCCB[giOffset].pcMesg[PVRSRV_REFCOUNT_CCB_MESG_MAX - 1] = 0; + giOffset = (giOffset + 1) % PVRSRV_REFCOUNT_CCB_MAX; + + PVRSRV_UNLOCK_CCB(); + +skip: + psOffsetStruct->ui32Mapped--; +} + +#endif /* defined(__linux__) */ + +#endif /* defined(PVRSRV_REFCOUNT_DEBUG) */ diff --git a/pvr-source/services4/srvkm/common/resman.c b/pvr-source/services4/srvkm/common/resman.c new file mode 100644 index 0000000..aef102f --- /dev/null +++ b/pvr-source/services4/srvkm/common/resman.c @@ -0,0 +1,985 @@ +/*************************************************************************/ /*! +@Title Resource Manager +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "services_headers.h" +#include "resman.h" + +#ifdef __linux__ +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <linux/sched.h> +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) +#include <linux/hardirq.h> +#else +#include <asm/hardirq.h> +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +#include <linux/mutex.h> +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) +#include <linux/semaphore.h> +#else +#include <asm/semaphore.h> +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +static DEFINE_MUTEX(lock); +#define DOWN(m) mutex_lock(m) +#define UP(m) mutex_unlock(m) +#else +static DECLARE_MUTEX(lock); +#define DOWN(m) down(m) +#define UP(m) up(m) +#endif + +#define ACQUIRE_SYNC_OBJ do { \ + if (in_interrupt()) { \ + printk("ISR cannot take RESMAN mutex\n"); \ + BUG(); \ + } \ + else DOWN(&lock); \ +} while (0) +#define RELEASE_SYNC_OBJ UP(&lock) + +#else + +#define ACQUIRE_SYNC_OBJ +#define RELEASE_SYNC_OBJ + +#endif + +#define RESMAN_SIGNATURE 0x12345678 + +/****************************************************************************** + * resman structures + *****************************************************************************/ + +/* resman item structure */ +typedef struct _RESMAN_ITEM_ +{ +#ifdef DEBUG + IMG_UINT32 ui32Signature; +#endif + struct _RESMAN_ITEM_ **ppsThis; /*!< list navigation */ + struct _RESMAN_ITEM_ *psNext; /*!< list navigation */ + + IMG_UINT32 ui32Flags; /*!< flags */ + IMG_UINT32 ui32ResType;/*!< res type */ + + IMG_PVOID pvParam; /*!< param1 for callback */ + IMG_UINT32 ui32Param; /*!< param2 for callback */ + + RESMAN_FREE_FN pfnFreeResource;/*!< resman item free callback */ +} RESMAN_ITEM; + + +/* resman context structure */ +typedef struct _RESMAN_CONTEXT_ +{ +#ifdef DEBUG + IMG_UINT32 ui32Signature; +#endif + struct _RESMAN_CONTEXT_ **ppsThis;/*!< list navigation */ + struct _RESMAN_CONTEXT_ *psNext;/*!< list navigation */ + + PVRSRV_PER_PROCESS_DATA *psPerProc; /* owner of resources */ + + RESMAN_ITEM *psResItemList;/*!< res item list for context */ + +} RESMAN_CONTEXT; + + +/* resman list structure */ +typedef struct +{ + RESMAN_CONTEXT *psContextList; /*!< resman context list */ + +} RESMAN_LIST, *PRESMAN_LIST; /* PRQA S 3205 */ + + +PRESMAN_LIST gpsResList = IMG_NULL; + +#include "lists.h" /* PRQA S 5087 */ /* include lists.h required here */ + +static IMPLEMENT_LIST_ANY_VA(RESMAN_ITEM) +static IMPLEMENT_LIST_ANY_VA_2(RESMAN_ITEM, IMG_BOOL, IMG_FALSE) +static IMPLEMENT_LIST_INSERT(RESMAN_ITEM) +static IMPLEMENT_LIST_REMOVE(RESMAN_ITEM) +static IMPLEMENT_LIST_REVERSE(RESMAN_ITEM) + +static IMPLEMENT_LIST_REMOVE(RESMAN_CONTEXT) +static IMPLEMENT_LIST_INSERT(RESMAN_CONTEXT) + + +#define PRINT_RESLIST(x, y, z) + +/******************************************************** Forword references */ + +static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback, IMG_BOOL bForceCleanup); + +static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bExecuteCallback); + + +#ifdef DEBUG + static IMG_VOID ValidateResList(PRESMAN_LIST psResList); + #define VALIDATERESLIST() ValidateResList(gpsResList) +#else + #define VALIDATERESLIST() +#endif + + + + + + +/*! +****************************************************************************** + + @Function ResManInit + + @Description initialises the resman + + @Return none + +******************************************************************************/ +PVRSRV_ERROR ResManInit(IMG_VOID) +{ + if (gpsResList == IMG_NULL) + { + /* If not already initialised */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(*gpsResList), + (IMG_VOID **)&gpsResList, IMG_NULL, + "Resource Manager List") != PVRSRV_OK) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Init list, the linked list has dummy entries at both ends */ + gpsResList->psContextList = IMG_NULL; + + /* Check resource list */ + VALIDATERESLIST(); + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function ResManDeInit + + @Description de-initialises the resman + + @Return none + +******************************************************************************/ +IMG_VOID ResManDeInit(IMG_VOID) +{ + if (gpsResList != IMG_NULL) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL); + gpsResList = IMG_NULL; + } +} + + +/*! +****************************************************************************** + + @Function PVRSRVResManConnect + + @Description Opens a connection to the Resource Manager + + @input hPerProc - Per-process data (if applicable) + @output phResManContext - Resman context + + @Return error code or PVRSRV_OK + +******************************************************************************/ +PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc, + PRESMAN_CONTEXT *phResManContext) +{ + PVRSRV_ERROR eError; + PRESMAN_CONTEXT psResManContext; + + /*Acquire resource list sync object*/ + ACQUIRE_SYNC_OBJ; + + /*Check resource list*/ + VALIDATERESLIST(); + + /* Allocate memory for the new context. */ + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext), + (IMG_VOID **)&psResManContext, IMG_NULL, + "Resource Manager Context"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVResManConnect: ERROR allocating new RESMAN context struct")); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return eError; + } + +#ifdef DEBUG + psResManContext->ui32Signature = RESMAN_SIGNATURE; +#endif /* DEBUG */ + psResManContext->psResItemList = IMG_NULL; + psResManContext->psPerProc = hPerProc; + + /* Insert new context struct after the dummy first entry */ + List_RESMAN_CONTEXT_Insert(&gpsResList->psContextList, psResManContext); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + *phResManContext = psResManContext; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function PVRSRVResManDisconnect + + @Description Closes a Resource Manager connection and frees all resources + + @input hResManContext - Resman context + @input bKernelContext - IMG_TRUE for kernel contexts + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext, + IMG_BOOL bKernelContext) +{ + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + /* Check resource list */ + VALIDATERESLIST(); + + /* Print and validate resource list */ + PRINT_RESLIST(gpsResList, psResManContext, IMG_TRUE); + + /* Free all auto-freed resources in order */ + + if (!bKernelContext) + { + /* OS specific User-mode Mappings: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE); + + /* VGX types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DMA_CLIENT_FIFO_DATA, 0, 0, IMG_TRUE); + + /* Event Object */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE); + + /* syncobject state (Read/Write Complete values) */ + /* Must be FIFO, so we reverse the list, twice */ + List_RESMAN_ITEM_Reverse(&psResManContext->psResItemList); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_MODIFY_SYNC_OPS, 0, 0, IMG_TRUE); + List_RESMAN_ITEM_Reverse(&psResManContext->psResItemList); // (could survive without this - all following items would be cleared up "fifo" too) + + /* SGX types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE); + + /* COMMON types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SYNC_INFO, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_MEM_INFO, 0, 0, IMG_TRUE); +#if defined(SUPPORT_ION) + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ION, 0, 0, IMG_TRUE); +#endif + /* DISPLAY CLASS types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, 0, 0, IMG_TRUE); + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE); + + /* BUFFER CLASS types: */ + FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE); + } + + /* Ensure that there are no resources left */ + PVR_ASSERT(psResManContext->psResItemList == IMG_NULL); + + /* Remove the context struct from the list */ + List_RESMAN_CONTEXT_Remove(psResManContext); + + /* Free the context struct */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT), psResManContext, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + + /* Check resource list */ + VALIDATERESLIST(); + + /* Print and validate resource list */ + PRINT_RESLIST(gpsResList, psResManContext, IMG_FALSE); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; +} + + +/*! +****************************************************************************** + @Function ResManRegisterRes + + @Description : Inform the resource manager that the given resource has + been alloacted and freeing of it will be the responsibility + of the resource manager + + @input psResManContext - resman context + @input ui32ResType - identify what kind of resource it is + @input pvParam - address of resource + @input ui32Param - size of resource + @input pfnFreeResource - pointer to function that frees this resource + + @Return On success a pointer to an opaque data structure that represents + the allocated resource, else NULL + +**************************************************************************/ +PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + RESMAN_FREE_FN pfnFreeResource) +{ + PRESMAN_ITEM psNewResItem; + + PVR_ASSERT(psResManContext != IMG_NULL); + PVR_ASSERT(ui32ResType != 0); + + if (psResManContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: invalid parameter - psResManContext")); + return (PRESMAN_ITEM) IMG_NULL; + } + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + /* Check resource list */ + VALIDATERESLIST(); + + PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource " + "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, " + "FreeFunc %08X", + (IMG_UINTPTR_T)psResManContext, + ui32ResType, + (IMG_UINTPTR_T)pvParam, + ui32Param, + (IMG_UINTPTR_T)pfnFreeResource)); + + /* Allocate memory for the new resource structure */ + if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem, + IMG_NULL, + "Resource Manager Item") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: " + "ERROR allocating new resource item")); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return((PRESMAN_ITEM)IMG_NULL); + } + + /* Fill in details about this resource */ +#ifdef DEBUG + psNewResItem->ui32Signature = RESMAN_SIGNATURE; +#endif /* DEBUG */ + psNewResItem->ui32ResType = ui32ResType; + psNewResItem->pvParam = pvParam; + psNewResItem->ui32Param = ui32Param; + psNewResItem->pfnFreeResource = pfnFreeResource; + psNewResItem->ui32Flags = 0; + + /* Insert new structure after dummy first entry */ + List_RESMAN_ITEM_Insert(&psResManContext->psResItemList, psNewResItem); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return(psNewResItem); +} + +/*! +****************************************************************************** + @Function ResManFreeResByPtr + + @Description frees a resource by matching on pointer type + + @inputs psResItem - pointer to resource item to free + bForceCleanup - ignored uKernel re-sync + + @Return PVRSRV_ERROR +**************************************************************************/ +PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM *psResItem, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psResItem != IMG_NULL); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do")); + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %08X", + (IMG_UINTPTR_T)psResItem)); + + /*Acquire resource list sync object*/ + ACQUIRE_SYNC_OBJ; + + /*Check resource list*/ + VALIDATERESLIST(); + + /*Free resource*/ + eError = FreeResourceByPtr(psResItem, IMG_TRUE, bForceCleanup); + + /*Check resource list*/ + VALIDATERESLIST(); + + /*Release resource list sync object*/ + RELEASE_SYNC_OBJ; + + return(eError); +} + + +/*! +****************************************************************************** + @Function ResManFreeResByCriteria + + @Description frees a resource by matching on criteria + + @inputs hResManContext - handle for resman context + @inputs ui32SearchCriteria - indicates which parameters should be + used in search for resources to free + @inputs ui32ResType - identify what kind of resource to free + @inputs pvParam - address of resource to be free + @inputs ui32Param - size of resource to be free + + @Return PVRSRV_ERROR +**************************************************************************/ +PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param) +{ + PVRSRV_ERROR eError; + + PVR_ASSERT(psResManContext != IMG_NULL); + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + /* Check resource list */ + VALIDATERESLIST(); + + PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: " + "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x", + (IMG_UINTPTR_T)psResManContext, ui32SearchCriteria, ui32ResType, + (IMG_UINTPTR_T)pvParam, ui32Param)); + + /* Free resources by criteria for this context */ + eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria, + ui32ResType, pvParam, ui32Param, + IMG_TRUE); + + /* Check resource list */ + VALIDATERESLIST(); + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + + return eError; +} + + +/*! +****************************************************************************** + @Function ResManDissociateRes + + @Description Moves a resource from one context to another. + + @inputs psResItem - pointer to resource item to dissociate + @inputs psNewResManContext - new resman context for the resource + + @Return IMG_VOID +**************************************************************************/ +PVRSRV_ERROR ResManDissociateRes(RESMAN_ITEM *psResItem, + PRESMAN_CONTEXT psNewResManContext) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psResItem != IMG_NULL); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: invalid parameter - psResItem")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#ifdef DEBUG /* QAC fix */ + PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE); +#endif + + if (psNewResManContext != IMG_NULL) + { + /* Remove this item from its old resource list */ + List_RESMAN_ITEM_Remove(psResItem); + + /* Re-insert into new list */ + List_RESMAN_ITEM_Insert(&psNewResManContext->psResItemList, psResItem); + + } + else + { + eError = FreeResourceByPtr(psResItem, IMG_FALSE, CLEANUP_WITH_POLL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: failed to free resource by pointer")); + return eError; + } + } + + return eError; +} + +/*! +****************************************************************************** + @Function ResManFindResourceByPtr_AnyVaCb + + @Description + Compares the resman item with a given pointer. + + @inputs psCurItem - theThe item to check + @inputs va - Variable argument list with: + psItem - pointer to resource item to find + + @Return IMG_BOOL +**************************************************************************/ +static IMG_BOOL ResManFindResourceByPtr_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va) +{ + RESMAN_ITEM *psItem; + + psItem = va_arg(va, RESMAN_ITEM*); + + return (IMG_BOOL)(psCurItem == psItem); +} + + +/*! +****************************************************************************** + @Function ResManFindResourceByPtr + + @Description + Attempts to find a resource in the list for this context + + @inputs hResManContext - handle for resman context + @inputs psItem - pointer to resource item to find + + @Return PVRSRV_ERROR +**************************************************************************/ +IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT psResManContext, + RESMAN_ITEM *psItem) +{ +/* RESMAN_ITEM *psCurItem;*/ + + PVRSRV_ERROR eResult; + + PVR_ASSERT(psResManContext != IMG_NULL); + PVR_ASSERT(psItem != IMG_NULL); + + if ((psItem == IMG_NULL) || (psResManContext == IMG_NULL)) + { + PVR_DPF((PVR_DBG_ERROR, "ResManFindResourceByPtr: invalid parameter")); + PVR_DBG_BREAK; + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#ifdef DEBUG /* QAC fix */ + PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE); +#endif + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + PVR_DPF((PVR_DBG_MESSAGE, + "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X", + (IMG_UINTPTR_T)psItem, (IMG_UINTPTR_T)psItem->psNext)); + + PVR_DPF((PVR_DBG_MESSAGE, + "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, " + "Param 0x%x, FnCall %08X, Flags 0x%x", + (IMG_UINTPTR_T)psResManContext, + psItem->ui32ResType, + (IMG_UINTPTR_T)psItem->pvParam, + psItem->ui32Param, + (IMG_UINTPTR_T)psItem->pfnFreeResource, + psItem->ui32Flags)); + + /* Search resource items starting at after the first dummy item */ + if(List_RESMAN_ITEM_IMG_BOOL_Any_va(psResManContext->psResItemList, + &ResManFindResourceByPtr_AnyVaCb, + psItem)) + { + eResult = PVRSRV_OK; + } + else + { + eResult = PVRSRV_ERROR_NOT_OWNER; + } + + /* Release resource list sync object */ + RELEASE_SYNC_OBJ; + +/* return PVRSRV_ERROR_NOT_OWNER;*/ + return eResult; +} + +/*! +****************************************************************************** + @Function FreeResourceByPtr + + @Description + Frees a resource and move it from the list + NOTE : this function must be called with the resource + list sync object held + + @inputs psItem - pointer to resource item to free + bExecuteCallback - execute callback? + bForceCleanup - skips uKernel re-sync + + @Return PVRSRV_ERROR +**************************************************************************/ +static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, + IMG_BOOL bExecuteCallback, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + PVR_ASSERT(psItem != IMG_NULL); + + if (psItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + +#ifdef DEBUG /* QAC fix */ + PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE); +#endif + + PVR_DPF((PVR_DBG_MESSAGE, + "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X", + (IMG_UINTPTR_T)psItem, (IMG_UINTPTR_T)psItem->psNext)); + + PVR_DPF((PVR_DBG_MESSAGE, + "FreeResourceByPtr: Type 0x%x, Addr 0x%x, " + "Param 0x%x, FnCall %08X, Flags 0x%x", + psItem->ui32ResType, + (IMG_UINTPTR_T)psItem->pvParam, psItem->ui32Param, + (IMG_UINTPTR_T)psItem->pfnFreeResource, psItem->ui32Flags)); + + /* Release resource list sync object just in case the free routine calls the resource manager */ + RELEASE_SYNC_OBJ; + + /* Call the freeing routine */ + if (bExecuteCallback) + { + eError = psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param, bForceCleanup); + if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) + { + PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function")); + } + } + + /* Acquire resource list sync object */ + ACQUIRE_SYNC_OBJ; + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Remove this item from the resource list */ + List_RESMAN_ITEM_Remove(psItem); + + /* Free memory for the resource item */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem, IMG_NULL); + } + + return(eError); +} + +/*! +****************************************************************************** + @Function FreeResourceByCriteria_AnyVaCb + + @Description + Matches a resource manager item with a given criteria. + + @inputs psCuItem - the item to be matched + @inputs va - a variable argument list with:. + ui32SearchCriteria - indicates which parameters should be used + search for resources to free + ui32ResType - identify what kind of resource to free + pvParam - address of resource to be free + ui32Param - size of resource to be free + + + @Return psCurItem if matched, IMG_NULL otherwise. +**************************************************************************/ +static IMG_VOID* FreeResourceByCriteria_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va) +{ + IMG_UINT32 ui32SearchCriteria; + IMG_UINT32 ui32ResType; + IMG_PVOID pvParam; + IMG_UINT32 ui32Param; + + ui32SearchCriteria = va_arg(va, IMG_UINT32); + ui32ResType = va_arg(va, IMG_UINT32); + pvParam = va_arg(va, IMG_PVOID); + ui32Param = va_arg(va, IMG_UINT32); + + /*check that for all conditions are either disabled or eval to true*/ + if( + /* Check resource type */ + (((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) == 0UL) || + (psCurItem->ui32ResType == ui32ResType)) + && + /* Check address */ + (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) == 0UL) || + (psCurItem->pvParam == pvParam)) + && + /* Check size */ + (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) == 0UL) || + (psCurItem->ui32Param == ui32Param)) + ) + { + return psCurItem; + } + else + { + return IMG_NULL; + } +} + +/*! +****************************************************************************** + @Function FreeResourceByCriteria + + @Description + Frees all resources that match the given criteria for the + context. + NOTE : this function must be called with the resource + list sync object held + + @inputs psResManContext - pointer to resman context + @inputs ui32SearchCriteria - indicates which parameters should be used + @inputs search for resources to free + @inputs ui32ResType - identify what kind of resource to free + @inputs pvParam - address of resource to be free + @inputs ui32Param - size of resource to be free + @inputs ui32AutoFreeLev - auto free level to free + @inputs bExecuteCallback - execute callback? + + @Return PVRSRV_ERROR +**************************************************************************/ +static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bExecuteCallback) +{ + PRESMAN_ITEM psCurItem; + PVRSRV_ERROR eError = PVRSRV_OK; + + /* Search resource items starting at after the first dummy item */ + /*while we get a match and not an error*/ + while((psCurItem = (PRESMAN_ITEM) + List_RESMAN_ITEM_Any_va(psResManContext->psResItemList, + &FreeResourceByCriteria_AnyVaCb, + ui32SearchCriteria, + ui32ResType, + pvParam, + ui32Param)) != IMG_NULL + && eError == PVRSRV_OK) + { + do + { + eError = FreeResourceByPtr(psCurItem, bExecuteCallback, CLEANUP_WITH_POLL); + if (eError == PVRSRV_ERROR_RETRY) + { + RELEASE_SYNC_OBJ; + OSReleaseBridgeLock(); + /* Give a chance for other threads to come in and SGX to do more work */ + OSSleepms(MAX_CLEANUP_TIME_WAIT_US/1000); + OSReacquireBridgeLock(); + ACQUIRE_SYNC_OBJ; + } + } while (eError == PVRSRV_ERROR_RETRY); + } + + return eError; +} + + +#ifdef DEBUG +/*! +****************************************************************************** + @Function ValidateResList + + @Description + Walks the resource list check the pointers + NOTE : this function must be called with the resource + list sync object held + + @Return none +**************************************************************************/ +static IMG_VOID ValidateResList(PRESMAN_LIST psResList) +{ + PRESMAN_ITEM psCurItem, *ppsThisItem; + PRESMAN_CONTEXT psCurContext, *ppsThisContext; + + /* check we're initialised */ + if (psResList == IMG_NULL) + { + PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet")); + return; + } + + psCurContext = psResList->psContextList; + ppsThisContext = &psResList->psContextList; + + /* Walk the context list */ + while(psCurContext != IMG_NULL) + { + /* Check current item */ + PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE); + if (psCurContext->ppsThis != ppsThisContext) + { + PVR_DPF((PVR_DBG_WARNING, + "psCC=%08X psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X", + (IMG_UINTPTR_T)psCurContext, + (IMG_UINTPTR_T)psCurContext->ppsThis, + (IMG_UINTPTR_T)psCurContext->psNext, + (IMG_UINTPTR_T)ppsThisContext)); + PVR_ASSERT(psCurContext->ppsThis == ppsThisContext); + } + + /* Walk the list for this context */ + psCurItem = psCurContext->psResItemList; + ppsThisItem = &psCurContext->psResItemList; + while(psCurItem != IMG_NULL) + { + /* Check current item */ + PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE); + if (psCurItem->ppsThis != ppsThisItem) + { + PVR_DPF((PVR_DBG_WARNING, + "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X", + (IMG_UINTPTR_T)psCurItem, + (IMG_UINTPTR_T)psCurItem->ppsThis, + (IMG_UINTPTR_T)psCurItem->psNext, + (IMG_UINTPTR_T)ppsThisItem)); + PVR_ASSERT(psCurItem->ppsThis == ppsThisItem); + } + + /* Move to next item */ + ppsThisItem = &psCurItem->psNext; + psCurItem = psCurItem->psNext; + } + + /* Move to next context */ + ppsThisContext = &psCurContext->psNext; + psCurContext = psCurContext->psNext; + } +} +#endif /* DEBUG */ + + +/****************************************************************************** + End of file (resman.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/common/ttrace.c b/pvr-source/services4/srvkm/common/ttrace.c new file mode 100644 index 0000000..574bf25 --- /dev/null +++ b/pvr-source/services4/srvkm/common/ttrace.c @@ -0,0 +1,597 @@ +/*************************************************************************/ /*! +@Title Timed Trace functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined (TTRACE) + +#include "services_headers.h" +#include "ttrace.h" + +#if defined(PVRSRV_NEED_PVR_DPF) +#define CHECKSIZE(n,m) \ + if ((n & m) != n) \ + PVR_DPF((PVR_DBG_ERROR,"Size check failed for " #m)) +#else +#define CHECKSIZE(n,m) +#endif + +#define TIME_TRACE_HASH_TABLE_SIZE 32 + +HASH_TABLE *g_psBufferTable; +IMG_UINT32 g_ui32HostUID; +IMG_HANDLE g_psTimer; + +/* Trace buffer struct */ +typedef struct +{ + IMG_UINT32 ui32Woff; /* Offset to where next item will be written */ + IMG_UINT32 ui32Roff; /* Offset to where to start reading from */ + IMG_UINT32 ui32ByteCount; /* Number of bytes in buffer */ + IMG_UINT8 ui8Data[0]; +} sTimeTraceBuffer; + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceItemSize + + @Description + + Calculate the size of a trace item + + @Input psTraceItem : Trace item + + @Return size of trace item + +******************************************************************************/ +static IMG_UINT32 +PVRSRVTimeTraceItemSize(IMG_UINT32 *psTraceItem) +{ + IMG_UINT32 ui32Size = PVRSRV_TRACE_ITEM_SIZE; + + ui32Size += READ_HEADER(SIZE, psTraceItem[PVRSRV_TRACE_DATA_HEADER]); + + return ui32Size; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceAllocItem + + @Description + + Allocate a trace item from the buffer of the current process + + @Output ppsTraceItem : Pointer to allocated trace item + + @Input ui32Size : Size of data packet to be allocated + + @Return none + +******************************************************************************/ +static IMG_VOID +PVRSRVTimeTraceAllocItem(IMG_UINT32 **pui32Item, IMG_UINT32 ui32Size) +{ + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + IMG_UINT32 ui32AllocOffset; + sTimeTraceBuffer *psBuffer = (sTimeTraceBuffer *) HASH_Retrieve(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + + /* The caller only asks for extra data space */ + ui32Size += PVRSRV_TRACE_ITEM_SIZE; + + /* Always round to 32-bit */ + ui32Size = ((ui32Size - 1) & (~0x3)) + 0x04; + + if (!psBuffer) + { + PVRSRV_ERROR eError; + + PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVTimeTraceAllocItem: Creating buffer for PID %u", (IMG_UINT32) ui32PID)); + eError = PVRSRVTimeTraceBufferCreate(ui32PID); + if (eError != PVRSRV_OK) + { + *pui32Item = IMG_NULL; + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceAllocItem: Failed to create buffer")); + return; + } + + psBuffer = (sTimeTraceBuffer *) HASH_Retrieve(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + if (psBuffer == IMG_NULL) + { + *pui32Item = NULL; + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceAllocItem: Failed to retrieve buffer")); + return; + } + } + + /* Can't allocate more then buffer size */ + if (ui32Size >= TIME_TRACE_BUFFER_SIZE) + { + *pui32Item = NULL; + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceAllocItem: Error trace item too large (%d)", ui32Size)); + return; + } + + /* FIXME: Enter critical section? */ + + /* Always ensure we have enough space to write a padding message */ + if ((psBuffer->ui32Woff + ui32Size + PVRSRV_TRACE_ITEM_SIZE) > TIME_TRACE_BUFFER_SIZE) + { + IMG_UINT32 *ui32WriteEOB = (IMG_UINT32 *) &psBuffer->ui8Data[psBuffer->ui32Woff]; + IMG_UINT32 ui32Remain = TIME_TRACE_BUFFER_SIZE - psBuffer->ui32Woff; + + /* Not enough space at the end of the buffer, back to the start */ + *ui32WriteEOB++ = WRITE_HEADER(GROUP, PVRSRV_TRACE_GROUP_PADDING); + *ui32WriteEOB++ = 0; /* Don't need timestamp */ + *ui32WriteEOB++ = 0; /* Don't need UID */ + *ui32WriteEOB = WRITE_HEADER(SIZE, (ui32Remain - PVRSRV_TRACE_ITEM_SIZE)); + psBuffer->ui32ByteCount += ui32Remain; + psBuffer->ui32Woff = ui32AllocOffset = 0; + } + else + ui32AllocOffset = psBuffer->ui32Woff; + + psBuffer->ui32Woff = psBuffer->ui32Woff + ui32Size; + psBuffer->ui32ByteCount += ui32Size; + + /* This allocation will start overwritting past our read pointer, move the read pointer along */ + while (psBuffer->ui32ByteCount > TIME_TRACE_BUFFER_SIZE) + { + IMG_UINT32 *psReadItem = (IMG_UINT32 *) &psBuffer->ui8Data[psBuffer->ui32Roff]; + IMG_UINT32 ui32ReadSize; + + ui32ReadSize = PVRSRVTimeTraceItemSize(psReadItem); + psBuffer->ui32Roff = (psBuffer->ui32Roff + ui32ReadSize) & (TIME_TRACE_BUFFER_SIZE - 1); + psBuffer->ui32ByteCount -= ui32ReadSize; + } + + *pui32Item = (IMG_UINT32 *) &psBuffer->ui8Data[ui32AllocOffset]; + /* FIXME: Exit critical section? */ +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceBufferCreate + + @Description + + Create a trace buffer. + + Note: We assume that this will only be called once per process. + + @Input ui32PID : PID of the process that is creating the buffer + + @Return none + +******************************************************************************/ +PVRSRV_ERROR PVRSRVTimeTraceBufferCreate(IMG_UINT32 ui32PID) +{ + sTimeTraceBuffer *psBuffer; + PVRSRV_ERROR eError = PVRSRV_OK; + + eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT, + sizeof(sTimeTraceBuffer) + TIME_TRACE_BUFFER_SIZE, + (IMG_VOID **)&psBuffer, IMG_NULL, + "Time Trace Buffer"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceBufferCreate: Error allocating trace buffer")); + return eError; + } + + OSMemSet(psBuffer, 0, TIME_TRACE_BUFFER_SIZE); + + if (!HASH_Insert(g_psBufferTable, (IMG_UINTPTR_T) ui32PID, (IMG_UINTPTR_T) psBuffer)) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(sTimeTraceBuffer) + TIME_TRACE_BUFFER_SIZE, + psBuffer, NULL); + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceBufferCreate: Error adding trace buffer to hash table")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceBufferDestroy + + @Description + + Destroy a trace buffer. + + Note: We assume that this will only be called once per process. + + @Input ui32PID : PID of the process that is creating the buffer + + @Return none + +******************************************************************************/ +PVRSRV_ERROR PVRSRVTimeTraceBufferDestroy(IMG_UINT32 ui32PID) +{ + sTimeTraceBuffer *psBuffer; + +#if defined(DUMP_TTRACE_BUFFERS_ON_EXIT) + PVRSRVDumpTimeTraceBuffers(); +#endif + psBuffer = (sTimeTraceBuffer *) HASH_Retrieve(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + if (psBuffer) + { + OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(sTimeTraceBuffer) + TIME_TRACE_BUFFER_SIZE, + psBuffer, NULL); + HASH_Remove(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); + return PVRSRV_OK; + } + + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceBufferDestroy: Can't find trace buffer in hash table")); + return PVRSRV_ERROR_INVALID_PARAMS; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceInit + + @Description + + Initialise the timed trace subsystem. + + @Return Error + +******************************************************************************/ +PVRSRV_ERROR PVRSRVTimeTraceInit(IMG_VOID) +{ + g_psBufferTable = HASH_Create(TIME_TRACE_HASH_TABLE_SIZE); + + /* Create hash table to store the per process buffers in */ + if (!g_psBufferTable) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceInit: Error creating hash table")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Create the kernel buffer */ + PVRSRVTimeTraceBufferCreate(KERNEL_ID); + + g_psTimer = OSFuncHighResTimerCreate(); + + if (!g_psTimer) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceInit: Error creating timer")); + return PVRSRV_ERROR_INIT_FAILURE; + } + return PVRSRV_OK; +} + +static PVRSRV_ERROR _PVRSRVTimeTraceBufferDestroy(IMG_UINTPTR_T hKey, IMG_UINTPTR_T hData) +{ + PVR_UNREFERENCED_PARAMETER(hData); + PVR_DPF((PVR_DBG_MESSAGE, "_PVRSRVTimeTraceBufferDestroy: Destroying buffer for PID %u", (IMG_UINT32) hKey)); + + PVRSRVTimeTraceBufferDestroy(hKey); + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceDeinit + + @Description + + De-initialise the timed trace subsystem. + + @Return Error + +******************************************************************************/ +IMG_VOID PVRSRVTimeTraceDeinit(IMG_VOID) +{ + PVRSRVTimeTraceBufferDestroy(KERNEL_ID); + /* Free any buffers the where created at alloc item time */ + HASH_Iterate(g_psBufferTable, _PVRSRVTimeTraceBufferDestroy); + HASH_Delete(g_psBufferTable); + OSFuncHighResTimerDestroy(g_psTimer); +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceWriteHeader + + @Description + + Write the header for a trace item. + + @Input pui32TraceItem : Pointer to trace item + + @Input ui32Group : Trace item's group ID + + @Input ui32Class : Trace item's class ID + + @Input ui32Token : Trace item's ui32Token ID + + @Input ui32Size : Trace item's data payload size + + @Input ui32Type : Trace item's data type + + @Input ui32Count : Trace item's data count + + @Return Pointer to data payload space, or NULL if no data payload + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceWriteHeader) +#endif +static INLINE IMG_VOID *PVRSRVTimeTraceWriteHeader(IMG_UINT32 *pui32TraceItem, IMG_UINT32 ui32Group, + IMG_UINT32 ui32Class, IMG_UINT32 ui32Token, + IMG_UINT32 ui32Size, IMG_UINT32 ui32Type, + IMG_UINT32 ui32Count) +{ + /* Sanity check arg's */ + CHECKSIZE(ui32Group, PVRSRV_TRACE_GROUP_MASK); + CHECKSIZE(ui32Class, PVRSRV_TRACE_CLASS_MASK); + CHECKSIZE(ui32Token, PVRSRV_TRACE_TOKEN_MASK); + + CHECKSIZE(ui32Size, PVRSRV_TRACE_SIZE_MASK); + CHECKSIZE(ui32Type, PVRSRV_TRACE_TYPE_MASK); + CHECKSIZE(ui32Count, PVRSRV_TRACE_COUNT_MASK); + + /* Trace header */ + pui32TraceItem[PVRSRV_TRACE_HEADER] = WRITE_HEADER(GROUP, ui32Group); + pui32TraceItem[PVRSRV_TRACE_HEADER] |= WRITE_HEADER(CLASS, ui32Class); + pui32TraceItem[PVRSRV_TRACE_HEADER] |= WRITE_HEADER(TOKEN, ui32Token); + + /* Data header */ + pui32TraceItem[PVRSRV_TRACE_DATA_HEADER] = WRITE_HEADER(SIZE, ui32Size); + pui32TraceItem[PVRSRV_TRACE_DATA_HEADER] |= WRITE_HEADER(TYPE, ui32Type); + pui32TraceItem[PVRSRV_TRACE_DATA_HEADER] |= WRITE_HEADER(COUNT, ui32Count); + + pui32TraceItem[PVRSRV_TRACE_TIMESTAMP] = OSFuncHighResTimerGetus(g_psTimer); + pui32TraceItem[PVRSRV_TRACE_HOSTUID] = g_ui32HostUID++; + + return ui32Size?((IMG_VOID *) &pui32TraceItem[PVRSRV_TRACE_DATA_PAYLOAD]):NULL; +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceArray + + @Description + + Write trace item with an array of data + + @Input ui32Group : Trace item's group ID + + @Input ui32Class : Trace item's class ID + + @Input ui32Token : Trace item's ui32Token ID + + @Input ui32Size : Trace item's data payload size + + @Input ui32Type : Trace item's data type + + @Input ui32Count : Trace item's data count + + @Input pui8Data : Pointer to data array + + @Return Pointer to data payload space, or NULL if no data payload + +******************************************************************************/ +IMG_VOID PVRSRVTimeTraceArray(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, IMG_UINT32 ui32Token, + IMG_UINT32 ui32Type, IMG_UINT32 ui32Count, IMG_UINT8 *pui8Data) +{ + IMG_UINT32 *pui32TraceItem; + IMG_UINT32 ui32Size, ui32TypeSize; + IMG_UINT8 *ui8Ptr; + + /* Only the 1st 4 sizes are for ui types, others are "special" */ + switch (ui32Type) + { + case PVRSRV_TRACE_TYPE_UI8: ui32TypeSize = 1; + break; + case PVRSRV_TRACE_TYPE_UI16: ui32TypeSize = 2; + break; + case PVRSRV_TRACE_TYPE_UI32: ui32TypeSize = 4; + break; + case PVRSRV_TRACE_TYPE_UI64: ui32TypeSize = 8; + break; + default: + PVR_DPF((PVR_DBG_ERROR, "Unsupported size\n")); + return; + } + + ui32Size = ui32TypeSize * ui32Count; + + /* Allocate space from the buffer */ + PVRSRVTimeTraceAllocItem(&pui32TraceItem, ui32Size); + + if (!pui32TraceItem) + { + PVR_DPF((PVR_DBG_ERROR, "Can't find buffer\n")); + return; + } + + ui8Ptr = PVRSRVTimeTraceWriteHeader(pui32TraceItem, ui32Group, ui32Class, ui32Token, + ui32Size, ui32Type, ui32Count); + + if (ui8Ptr) + { + OSMemCopy(ui8Ptr, pui8Data, ui32Size); + } +} + +/*! +****************************************************************************** + + @Function PVRSRVTimeTraceSyncObject + + @Description + + Write trace item with a sync object + + @Input ui32Group : Trace item's group ID + + @Input ui32Token : Trace item's ui32Token ID + + @Input psSync : Sync object + + @Input ui8SyncOpp : Sync object operation + + @Return None + +******************************************************************************/ +IMG_VOID PVRSRVTimeTraceSyncObject(IMG_UINT32 ui32Group, IMG_UINT32 ui32Token, + PVRSRV_KERNEL_SYNC_INFO *psSync, IMG_UINT8 ui8SyncOp) +{ + IMG_UINT32 *pui32TraceItem; + IMG_UINT32 *ui32Ptr; + IMG_UINT32 ui32Size = PVRSRV_TRACE_TYPE_SYNC_SIZE; + + + PVRSRVTimeTraceAllocItem(&pui32TraceItem, ui32Size); + + if (!pui32TraceItem) + { + PVR_DPF((PVR_DBG_ERROR, "Can't find buffer\n")); + return; + } + + ui32Ptr = PVRSRVTimeTraceWriteHeader(pui32TraceItem, ui32Group, PVRSRV_TRACE_CLASS_SYNC, + ui32Token, ui32Size, PVRSRV_TRACE_TYPE_SYNC, 1); + + ui32Ptr[PVRSRV_TRACE_SYNC_UID] = psSync->ui32UID; + ui32Ptr[PVRSRV_TRACE_SYNC_WOP] = psSync->psSyncData->ui32WriteOpsPending; + ui32Ptr[PVRSRV_TRACE_SYNC_WOC] = psSync->psSyncData->ui32WriteOpsComplete; + ui32Ptr[PVRSRV_TRACE_SYNC_ROP] = psSync->psSyncData->ui32ReadOpsPending; + ui32Ptr[PVRSRV_TRACE_SYNC_ROC] = psSync->psSyncData->ui32ReadOpsComplete; + ui32Ptr[PVRSRV_TRACE_SYNC_RO2P] = psSync->psSyncData->ui32ReadOps2Pending; + ui32Ptr[PVRSRV_TRACE_SYNC_RO2C] = psSync->psSyncData->ui32ReadOps2Complete; + ui32Ptr[PVRSRV_TRACE_SYNC_WO_DEV_VADDR] = psSync->sWriteOpsCompleteDevVAddr.uiAddr; + ui32Ptr[PVRSRV_TRACE_SYNC_RO_DEV_VADDR] = psSync->sReadOpsCompleteDevVAddr.uiAddr; + ui32Ptr[PVRSRV_TRACE_SYNC_RO2_DEV_VADDR] = psSync->sReadOps2CompleteDevVAddr.uiAddr; + ui32Ptr[PVRSRV_TRACE_SYNC_OP] = ui8SyncOp; +} + +/*! +****************************************************************************** + + @Function PVRSRVDumpTimeTraceBuffer + + @Description + + Dump the contents of the trace buffer. + + @Input hKey : Trace item's group ID + + @Input hData : Trace item's ui32Token ID + + @Return Error + +******************************************************************************/ +static PVRSRV_ERROR PVRSRVDumpTimeTraceBuffer(IMG_UINTPTR_T hKey, IMG_UINTPTR_T hData) +{ + sTimeTraceBuffer *psBuffer = (sTimeTraceBuffer *) hData; + IMG_UINT32 ui32ByteCount = psBuffer->ui32ByteCount; + IMG_UINT32 ui32Walker = psBuffer->ui32Roff; + IMG_UINT32 ui32Read, ui32LineLen, ui32EOL, ui32MinLine; + + PVR_DPF((PVR_DBG_ERROR, "TTB for PID %u:\n", (IMG_UINT32) hKey)); + + while (ui32ByteCount) + { + IMG_UINT32 *pui32Buffer = (IMG_UINT32 *) &psBuffer->ui8Data[ui32Walker]; + + ui32LineLen = (ui32ByteCount/sizeof(IMG_UINT32)); + ui32EOL = (TIME_TRACE_BUFFER_SIZE - ui32Walker)/sizeof(IMG_UINT32); + ui32MinLine = (ui32LineLen < ui32EOL)?ui32LineLen:ui32EOL; + + if (ui32MinLine >= 4) + { + PVR_DPF((PVR_DBG_ERROR, "\t(TTB-%X) %08X %08X %08X %08X", ui32ByteCount, + pui32Buffer[0], pui32Buffer[1], pui32Buffer[2], pui32Buffer[3])); + ui32Read = 4 * sizeof(IMG_UINT32); + } + else if (ui32MinLine >= 3) + { + PVR_DPF((PVR_DBG_ERROR, "\t(TTB-%X) %08X %08X %08X", ui32ByteCount, + pui32Buffer[0], pui32Buffer[1], pui32Buffer[2])); + ui32Read = 3 * sizeof(IMG_UINT32); + } + else if (ui32MinLine >= 2) + { + PVR_DPF((PVR_DBG_ERROR, "\t(TTB-%X) %08X %08X", ui32ByteCount, + pui32Buffer[0], pui32Buffer[1])); + ui32Read = 2 * sizeof(IMG_UINT32); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "\t(TTB-%X) %08X", ui32ByteCount, + pui32Buffer[0])); + ui32Read = sizeof(IMG_UINT32); + } + + ui32Walker = (ui32Walker + ui32Read) & (TIME_TRACE_BUFFER_SIZE - 1); + ui32ByteCount -= ui32Read; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function PVRSRVDumpTimeTraceBuffers + + @Description + + Dump the contents of all the trace buffers. + + @Return None + +******************************************************************************/ +IMG_VOID PVRSRVDumpTimeTraceBuffers(IMG_VOID) +{ + HASH_Iterate(g_psBufferTable, PVRSRVDumpTimeTraceBuffer); +} + +#endif /* TTRACE */ diff --git a/pvr-source/services4/srvkm/devices/sgx/mmu.c b/pvr-source/services4/srvkm/devices/sgx/mmu.c new file mode 100644 index 0000000..44dc824 --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/mmu.c @@ -0,0 +1,4600 @@ +/*************************************************************************/ /*! +@Title MMU Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "sgxdefs.h" +#include "sgxmmu.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "hash.h" +#include "ra.h" +#include "pdump_km.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#include "mmu.h" +#include "sgxconfig.h" +#include "sgx_bridge_km.h" +#include "pdump_osfunc.h" + +#define UINT32_MAX_VALUE 0xFFFFFFFFUL + +/* + MMU performs device virtual to physical translation. + terminology: + page directory (PD) + pagetable (PT) + data page (DP) + + Incoming 32bit Device Virtual Addresses are deconstructed into 3 fields: + --------------------------------------------------------- + | PD Index/tag: | PT Index: | DP offset: | + | bits 31:22 | bits 21:n | bits (n-1):0 | + --------------------------------------------------------- + where typically n=12 for a standard 4k DP + but n=16 for a 64k DP + + MMU page directory (PD), pagetable (PT) and data page (DP) config: + PD: + - always one page per address space + - up to 4k in size to span 4Gb (32bit) + - contains up to 1024 32bit entries + - entries are indexed by the top 12 bits of an incoming 32bit device virtual address + - the PD entry selected contains the physical address of the PT to + perform the next stage of the V to P translation + + PT: + - size depends on the DP size, e.g. 4k DPs have 4k PTs but 16k DPs have 1k PTs + - each PT always spans 4Mb of device virtual address space irrespective of DP size + - number of entries in a PT depend on DP size and ranges from 1024 to 4 entries + - entries are indexed by the PT Index field of the device virtual address (21:n) + - the PT entry selected contains the physical address of the DP to access + + DP: + - size varies from 4k to 4M in multiple of 4 steppings + - DP offset field of the device virtual address ((n-1):0) is used as a byte offset + to address into the DP itself +*/ + +#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT)) + +#if defined(FIX_HW_BRN_31620) +/* Sim doesn't use the address mask */ +#define SGX_MMU_PDE_DUMMY_PAGE (0)//(0x00000020U) +#define SGX_MMU_PTE_DUMMY_PAGE (0)//(0x00000020U) + +/* 4MB adress range per page table */ +#define BRN31620_PT_ADDRESS_RANGE_SHIFT 22 +#define BRN31620_PT_ADDRESS_RANGE_SIZE (1 << BRN31620_PT_ADDRESS_RANGE_SHIFT) + +/* 64MB address range per PDE cache line */ +#define BRN31620_PDE_CACHE_FILL_SHIFT 26 +#define BRN31620_PDE_CACHE_FILL_SIZE (1 << BRN31620_PDE_CACHE_FILL_SHIFT) +#define BRN31620_PDE_CACHE_FILL_MASK (BRN31620_PDE_CACHE_FILL_SIZE - 1) + +/* Page Directory Enteries per cache line */ +#define BRN31620_PDES_PER_CACHE_LINE_SHIFT (BRN31620_PDE_CACHE_FILL_SHIFT - BRN31620_PT_ADDRESS_RANGE_SHIFT) +#define BRN31620_PDES_PER_CACHE_LINE_SIZE (1 << BRN31620_PDES_PER_CACHE_LINE_SHIFT) +#define BRN31620_PDES_PER_CACHE_LINE_MASK (BRN31620_PDES_PER_CACHE_LINE_SIZE - 1) + +/* Macros for working out offset for dummy pages */ +#define BRN31620_DUMMY_PAGE_OFFSET (1 * SGX_MMU_PAGE_SIZE) +#define BRN31620_DUMMY_PDE_INDEX (BRN31620_DUMMY_PAGE_OFFSET / BRN31620_PT_ADDRESS_RANGE_SIZE) +#define BRN31620_DUMMY_PTE_INDEX ((BRN31620_DUMMY_PAGE_OFFSET - (BRN31620_DUMMY_PDE_INDEX * BRN31620_PT_ADDRESS_RANGE_SIZE))/SGX_MMU_PAGE_SIZE) + +/* Cache number of cache lines */ +#define BRN31620_CACHE_FLUSH_SHIFT (32 - BRN31620_PDE_CACHE_FILL_SHIFT) +#define BRN31620_CACHE_FLUSH_SIZE (1 << BRN31620_CACHE_FLUSH_SHIFT) + +/* Cache line bits in a UINT32 */ +#define BRN31620_CACHE_FLUSH_BITS_SHIFT 5 +#define BRN31620_CACHE_FLUSH_BITS_SIZE (1 << BRN31620_CACHE_FLUSH_BITS_SHIFT) +#define BRN31620_CACHE_FLUSH_BITS_MASK (BRN31620_CACHE_FLUSH_BITS_SIZE - 1) + +/* Cache line index in array */ +#define BRN31620_CACHE_FLUSH_INDEX_BITS (BRN31620_CACHE_FLUSH_SHIFT - BRN31620_CACHE_FLUSH_BITS_SHIFT) +#define BRN31620_CACHE_FLUSH_INDEX_SIZE (1 << BRN31620_CACHE_FLUSH_INDEX_BITS) + +#define BRN31620_DUMMY_PAGE_SIGNATURE 0xFEEBEE01 +#endif + +typedef struct _MMU_PT_INFO_ +{ + /* note: may need a union here to accommodate a PT page address for local memory */ + IMG_VOID *hPTPageOSMemHandle; + IMG_CPU_VIRTADDR PTPageCpuVAddr; + /* Map of reserved PTEs. + * Reserved PTEs are like "valid" PTEs in that they (and the DevVAddrs they represent) + * cannot be assigned to another allocation but their "reserved" status persists through + * any amount of mapping and unmapping, until the allocation is finally destroyed. + * + * Reserved and Valid are independent. + * When a PTE is first reserved, it will have Reserved=1 and Valid=0. + * When the PTE is actually mapped, it will have Reserved=1 and Valid=1. + * When the PTE is unmapped, it will have Reserved=1 and Valid=0. + * At this point, the PT will can not be destroyed because although there is + * not an active mapping on the PT, it is known a PTE is reserved for use. + * + * The above sequence of mapping and unmapping may repeat any number of times + * until the allocation is unmapped and destroyed which causes the PTE to have + * Valid=0 and Reserved=0. + */ + /* Number of PTEs set up. + * i.e. have a valid SGX Phys Addr and the "VALID" PTE bit == 1 + */ + IMG_UINT32 ui32ValidPTECount; +} MMU_PT_INFO; + +#define MMU_CONTEXT_NAME_SIZE 50 +struct _MMU_CONTEXT_ +{ + /* the device node */ + PVRSRV_DEVICE_NODE *psDeviceNode; + + /* Page Directory CPUVirt and DevPhys Addresses */ + IMG_CPU_VIRTADDR pvPDCpuVAddr; + IMG_DEV_PHYADDR sPDDevPAddr; + + IMG_VOID *hPDOSMemHandle; + + /* information about dynamically allocated pagetables */ + MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES]; + + PVRSRV_SGXDEV_INFO *psDevInfo; + +#if defined(PDUMP) + IMG_UINT32 ui32PDumpMMUContextID; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + IMG_BOOL bPDumpActive; +#endif +#endif + + IMG_UINT32 ui32PID; + IMG_CHAR szName[MMU_CONTEXT_NAME_SIZE]; + +#if defined (FIX_HW_BRN_31620) + IMG_UINT32 ui32PDChangeMask[BRN31620_CACHE_FLUSH_INDEX_SIZE]; + IMG_UINT32 ui32PDCacheRangeRefCount[BRN31620_CACHE_FLUSH_SIZE]; + MMU_PT_INFO *apsPTInfoListSave[SGX_MAX_PD_ENTRIES]; +#endif + struct _MMU_CONTEXT_ *psNext; +}; + +struct _MMU_HEAP_ +{ + /* MMU context */ + MMU_CONTEXT *psMMUContext; + + /* + heap specific details: + */ + /* the Base PD index for the heap */ + IMG_UINT32 ui32PDBaseIndex; + /* number of pagetables in this heap */ + IMG_UINT32 ui32PageTableCount; + /* total number of pagetable entries in this heap which may be mapped to data pages */ + IMG_UINT32 ui32PTETotalUsable; + /* PD entry DP size control field */ + IMG_UINT32 ui32PDEPageSizeCtrl; + + /* + Data Page (DP) Details: + */ + /* size in bytes of a data page */ + IMG_UINT32 ui32DataPageSize; + /* bit width of the data page offset addressing field */ + IMG_UINT32 ui32DataPageBitWidth; + /* bit mask of the data page offset addressing field */ + IMG_UINT32 ui32DataPageMask; + + /* + PageTable (PT) Details: + */ + /* bit shift to base of PT addressing field */ + IMG_UINT32 ui32PTShift; + /* bit width of the PT addressing field */ + IMG_UINT32 ui32PTBitWidth; + /* bit mask of the PT addressing field */ + IMG_UINT32 ui32PTMask; + /* size in bytes of a pagetable */ + IMG_UINT32 ui32PTSize; + /* Allocated PT Entries per PT */ + IMG_UINT32 ui32PTNumEntriesAllocated; + /* Usable PT Entries per PT (may be different to num allocated for 4MB data page) */ + IMG_UINT32 ui32PTNumEntriesUsable; + + /* + PageDirectory Details: + */ + /* bit shift to base of PD addressing field */ + IMG_UINT32 ui32PDShift; + /* bit width of the PD addressing field */ + IMG_UINT32 ui32PDBitWidth; + /* bit mask of the PT addressing field */ + IMG_UINT32 ui32PDMask; + + /* + Arena Info: + */ + RA_ARENA *psVMArena; + DEV_ARENA_DESCRIPTOR *psDevArena; + + /* If we have sparse mappings then we can't do PT level sanity checks */ + IMG_BOOL bHasSparseMappings; +#if defined(PDUMP) + PDUMP_MMU_ATTRIB sMMUAttrib; +#endif +}; + + + +#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE) +#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF +#endif + +/* local prototypes: */ +static IMG_VOID +_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT); + +#if defined(PDUMP) +static IMG_VOID +MMU_PDumpPageTables (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SIZE_T uSize, + IMG_BOOL bForUnmap, + IMG_HANDLE hUniqueTag); +#endif /* #if defined(PDUMP) */ + +/* This option tests page table memory, for use during device bring-up. */ +#define PAGE_TEST 0 +#if PAGE_TEST +static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr); +#endif + +/* This option dumps out the PT if an assert fails */ +#define PT_DUMP 1 + +/* This option sanity checks page table PTE valid count matches active PTEs */ +#define PT_DEBUG 0 +#if (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) +static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList) +{ + IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr; + IMG_UINT32 i; + + /* 1024 entries in a 4K page table */ + for(i = 0; i < 1024; i += 8) + { + PVR_DPF((PVR_DBG_ERROR, + "%08X %08X %08X %08X %08X %08X %08X %08X\n", + p[i + 0], p[i + 1], p[i + 2], p[i + 3], + p[i + 4], p[i + 5], p[i + 6], p[i + 7])); + } +} +#else /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */ +static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList) +{ + PVR_UNREFERENCED_PARAMETER(psPTInfoList); +} +#endif /* (PT_DEBUG || PT_DUMP) && defined(PVRSRV_NEED_PVR_DPF) */ + +#if PT_DEBUG +static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList) +{ + IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr; + IMG_UINT32 i, ui32Count = 0; + + /* 1024 entries in a 4K page table */ + for(i = 0; i < 1024; i++) + if(p[i] & SGX_MMU_PTE_VALID) + ui32Count++; + + if(psPTInfoList->ui32ValidPTECount != ui32Count) + { + PVR_DPF((PVR_DBG_ERROR, "ui32ValidPTECount: %u ui32Count: %u\n", + psPTInfoList->ui32ValidPTECount, ui32Count)); + DumpPT(psPTInfoList); + BUG(); + } +} +#else /* PT_DEBUG */ +static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList) +{ + PVR_UNREFERENCED_PARAMETER(psPTInfoList); +} +#endif /* PT_DEBUG */ + +/* + Debug functionality that allows us to make the CPU + mapping of pagetable memory readonly and only make + it read/write when we alter it. This allows us + to check that our memory isn't being overwritten +*/ +#if defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#else +#include <generated/autoconf.h> +#endif + +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/highmem.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +static IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr) +{ + pgd_t *psPGD; + pud_t *psPUD; + pmd_t *psPMD; + pte_t *psPTE; + pte_t ptent; + IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr; + + psPGD = pgd_offset_k(ui32CPUVAddr); + if (pgd_none(*psPGD) || pgd_bad(*psPGD)) + { + PVR_ASSERT(0); + } + + psPUD = pud_offset(psPGD, ui32CPUVAddr); + if (pud_none(*psPUD) || pud_bad(*psPUD)) + { + PVR_ASSERT(0); + } + + psPMD = pmd_offset(psPUD, ui32CPUVAddr); + if (pmd_none(*psPMD) || pmd_bad(*psPMD)) + { + PVR_ASSERT(0); + } + psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr); + + ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE); + ptent = pte_mkwrite(ptent); + ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent); + + flush_tlb_all(); +} + +static IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr) +{ + pgd_t *psPGD; + pud_t *psPUD; + pmd_t *psPMD; + pte_t *psPTE; + pte_t ptent; + IMG_UINT32 ui32CPUVAddr = (IMG_UINT32) ulCPUVAddr; + + OSWriteMemoryBarrier(); + + psPGD = pgd_offset_k(ui32CPUVAddr); + if (pgd_none(*psPGD) || pgd_bad(*psPGD)) + { + PVR_ASSERT(0); + } + + psPUD = pud_offset(psPGD, ui32CPUVAddr); + if (pud_none(*psPUD) || pud_bad(*psPUD)) + { + PVR_ASSERT(0); + } + + psPMD = pmd_offset(psPUD, ui32CPUVAddr); + if (pmd_none(*psPMD) || pmd_bad(*psPMD)) + { + PVR_ASSERT(0); + } + + psPTE = (pte_t *)pte_offset_kernel(psPMD, ui32CPUVAddr); + + ptent = ptep_modify_prot_start(&init_mm, ui32CPUVAddr, psPTE); + ptent = pte_wrprotect(ptent); + ptep_modify_prot_commit(&init_mm, ui32CPUVAddr, psPTE, ptent); + + flush_tlb_all(); + +} + +#else /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */ + +static INLINE IMG_VOID MakeKernelPageReadWrite(IMG_PVOID ulCPUVAddr) +{ + PVR_UNREFERENCED_PARAMETER(ulCPUVAddr); +} + +static INLINE IMG_VOID MakeKernelPageReadOnly(IMG_PVOID ulCPUVAddr) +{ + PVR_UNREFERENCED_PARAMETER(ulCPUVAddr); +} + +#endif /* defined(PVRSRV_MMU_MAKE_READWRITE_ON_DEMAND) */ + +/*___________________________________________________________________________ + + Information for SUPPORT_PDUMP_MULTI_PROCESS feature. + + The client marked for pdumping will set the bPDumpActive flag in + the MMU Context (see MMU_Initialise). + + Shared heap allocations should be persistent so all apps which + are pdumped will see the allocation. Persistent flag over-rides + the bPDumpActive flag (see pdump_common.c/DbgWrite function). + + The idea is to dump PT,DP for shared heap allocations, but only + dump the PDE if the allocation is mapped into the kernel or active + client context. This ensures if a background app allocates on a + shared heap then all clients can access it in the pdump toolchain. + + + + PD PT DP + +-+ + | |---> +-+ + +-+ | |---> +-+ + +-+ + + + +-+ + + PD allocation/free: pdump flags are 0 (only need PD for active apps) + PT allocation/free: pdump flags are 0 + unless PT is for a shared heap, in which case persistent is set + PD entries (MMU init/insert shared heap): + only pdump if PDE is on the active MMU context, flags are 0 + PD entries (PT alloc): + pdump flags are 0 if kernel heap + pdump flags are 0 if shared heap and PDE is on active MMU context + otherwise ignore. + PT entries pdump flags are 0 + unless PTE is for a shared heap, in which case persistent is set + + NOTE: PDump common code:- + PDumpMallocPages and PDumpMemKM also set the persistent flag for + shared heap allocations. + + ___________________________________________________________________________ +*/ + + +/*! +****************************************************************************** + FUNCTION: MMU_IsHeapShared + + PURPOSE: Is this heap shared? + PARAMETERS: In: pMMU_Heap + RETURNS: true if heap is shared +******************************************************************************/ +IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMUHeap) +{ + switch(pMMUHeap->psDevArena->DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED : + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED : + return IMG_TRUE; + case DEVICE_MEMORY_HEAP_PERCONTEXT : + case DEVICE_MEMORY_HEAP_KERNEL : + return IMG_FALSE; + default: + { + PVR_DPF((PVR_DBG_ERROR, "MMU_IsHeapShared: ERROR invalid heap type")); + return IMG_FALSE; + } + } +} + +#ifdef SUPPORT_SGX_MMU_BYPASS +/*! +****************************************************************************** + FUNCTION: EnableHostAccess + + PURPOSE: Enables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +EnableHostAccess (MMU_CONTEXT *psMMUContext) +{ + IMG_UINT32 ui32RegVal; + IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM; + + /* + bypass the MMU for the host port requestor, + conserving bypass state of other requestors + */ + ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL); + + OSWriteHWReg(pvRegsBaseKM, + EUR_CR_BIF_CTRL, + ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK); + /* assume we're not wiping-out any other bits */ + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK); +} + +/*! +****************************************************************************** + FUNCTION: DisableHostAccess + + PURPOSE: Disables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +DisableHostAccess (MMU_CONTEXT *psMMUContext) +{ + IMG_UINT32 ui32RegVal; + IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM; + + /* + disable MMU-bypass for the host port requestor, + conserving bypass state of other requestors + and flushing all caches/tlbs + */ + OSWriteHWReg(pvRegsBaseKM, + EUR_CR_BIF_CTRL, + ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK); + /* assume we're not wiping-out any other bits */ + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, 0); +} +#endif + + +#if defined(SGX_FEATURE_SYSTEM_CACHE) +/*! +****************************************************************************** + FUNCTION: MMU_InvalidateSystemLevelCache + + PURPOSE: Invalidates the System Level Cache to purge stale PDEs and PTEs + + PARAMETERS: In: psDevInfo + RETURNS: None + +******************************************************************************/ +static IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + #if defined(SGX_FEATURE_MP) + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL; + #else + /* The MMU always bypasses the SLC */ + PVR_UNREFERENCED_PARAMETER(psDevInfo); + #endif /* SGX_FEATURE_MP */ +} +#endif /* SGX_FEATURE_SYSTEM_CACHE */ + +/*! +****************************************************************************** + FUNCTION: MMU_InvalidateDirectoryCache + + PURPOSE: Invalidates the page directory cache + page table cache + requestor TLBs + + PARAMETERS: In: psDevInfo + RETURNS: None + +******************************************************************************/ +IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD; + #if defined(SGX_FEATURE_SYSTEM_CACHE) + MMU_InvalidateSystemLevelCache(psDevInfo); + #endif /* SGX_FEATURE_SYSTEM_CACHE */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_InvalidatePageTableCache + + PURPOSE: Invalidates the page table cache + requestor TLBs + + PARAMETERS: In: psDevInfo + RETURNS: None + +******************************************************************************/ +static IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PT; + #if defined(SGX_FEATURE_SYSTEM_CACHE) + MMU_InvalidateSystemLevelCache(psDevInfo); + #endif /* SGX_FEATURE_SYSTEM_CACHE */ +} + +#if defined(FIX_HW_BRN_31620) +/*! +****************************************************************************** + FUNCTION: BRN31620InvalidatePageTableEntry + + PURPOSE: Frees page tables in PDE cache line chunks re-wiring the + dummy page when required + + PARAMETERS: In: psMMUContext, ui32PDIndex, ui32PTIndex + RETURNS: None + +******************************************************************************/ +static IMG_VOID BRN31620InvalidatePageTableEntry(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32PDIndex, IMG_UINT32 ui32PTIndex, IMG_UINT32 *pui32PTE) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo; + + /* + * Note: We can't tell at this stage if this PT will be freed before + * the end of the function so we always wire up the dummy page to + * to the PT. + */ + if (((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX) + && (ui32PTIndex == BRN31620_DUMMY_PTE_INDEX)) + { + *pui32PTE = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID; + } + else + { + *pui32PTE = 0; + } +} + +/*! +****************************************************************************** + FUNCTION: BRN31620FreePageTable + + PURPOSE: Frees page tables in PDE cache line chunks re-wiring the + dummy page when required + + PARAMETERS: In: psMMUContext, ui32PDIndex + RETURNS: IMG_TRUE if we freed any PT's + +******************************************************************************/ +static IMG_BOOL BRN31620FreePageTable(MMU_HEAP *psMMUHeap, IMG_UINT32 ui32PDIndex) +{ + MMU_CONTEXT *psMMUContext = psMMUHeap->psMMUContext; + PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo; + IMG_UINT32 ui32PDCacheLine = ui32PDIndex >> BRN31620_PDES_PER_CACHE_LINE_SHIFT; + IMG_UINT32 bFreePTs = IMG_FALSE; + IMG_UINT32 *pui32Tmp; + + PVR_ASSERT(psMMUHeap != IMG_NULL); + + /* + * Clear the PT info for this PD index so even if we don't + * free the memory here apsPTInfoList[PDIndex] will trigger + * an "allocation" in _DeferredAllocPagetables which + * bumps up the refcount. + */ + PVR_ASSERT(psMMUContext->apsPTInfoListSave[ui32PDIndex] == IMG_NULL); + + psMMUContext->apsPTInfoListSave[ui32PDIndex] = psMMUContext->apsPTInfoList[ui32PDIndex]; + psMMUContext->apsPTInfoList[ui32PDIndex] = IMG_NULL; + + /* Check if this was the last PT in the cache line */ + if (--psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine] == 0) + { + IMG_UINT32 i; + IMG_UINT32 ui32PDIndexStart = ui32PDCacheLine * BRN31620_PDES_PER_CACHE_LINE_SIZE; + IMG_UINT32 ui32PDIndexEnd = ui32PDIndexStart + BRN31620_PDES_PER_CACHE_LINE_SIZE; + IMG_UINT32 ui32PDBitMaskIndex, ui32PDBitMaskShift; + + /* Free all PT's in cache line */ + for (i=ui32PDIndexStart;i<ui32PDIndexEnd;i++) + { + /* This PT is _really_ being freed now */ + psMMUContext->apsPTInfoList[i] = psMMUContext->apsPTInfoListSave[i]; + psMMUContext->apsPTInfoListSave[i] = IMG_NULL; + _DeferredFreePageTable(psMMUHeap, i - psMMUHeap->ui32PDBaseIndex, IMG_TRUE); + } + + ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT; + ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK; + + /* Check if this is a shared heap */ + if (MMU_IsHeapShared(psMMUHeap)) + { + /* Mark the remove of the Page Table from all memory contexts */ + MMU_CONTEXT *psMMUContextWalker = (MMU_CONTEXT*) psMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; + + while(psMMUContextWalker) + { + psMMUContextWalker->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift; + + /* + * We've just cleared a cache line's worth of PDE's so we need + * to wire up the dummy PT + */ + MakeKernelPageReadWrite(psMMUContextWalker->pvPDCpuVAddr); + pui32Tmp = (IMG_UINT32 *) psMMUContextWalker->pvPDCpuVAddr; + pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_DUMMY_PAGE + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(psMMUContextWalker->pvPDCpuVAddr); + + PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block"); + PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContextWalker->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + psMMUContextWalker = psMMUContextWalker->psNext; + } + } + else + { + psMMUContext->ui32PDChangeMask[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift; + + /* + * We've just cleared a cache line's worth of PDE's so we need + * to wire up the dummy PT + */ + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + pui32Tmp = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr; + pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_DUMMY_PAGE + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + + PDUMPCOMMENT("BRN31620 Re-wire dummy PT due to releasing PT allocation block"); + PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32Tmp[ui32PDIndexStart + BRN31620_DUMMY_PDE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + /* We've freed a cachline's worth of PDE's so trigger a PD cache flush */ + bFreePTs = IMG_TRUE; + } + + return bFreePTs; +} +#endif + +/*! +****************************************************************************** + FUNCTION: _AllocPageTableMemory + + PURPOSE: Allocate physical memory for a page table + + PARAMETERS: In: pMMUHeap - the mmu + In: psPTInfoList - PT info + Out: psDevPAddr - device physical address for new PT + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failed +******************************************************************************/ +static IMG_BOOL +_AllocPageTableMemory (MMU_HEAP *pMMUHeap, + MMU_PT_INFO *psPTInfoList, + IMG_DEV_PHYADDR *psDevPAddr) +{ + IMG_DEV_PHYADDR sDevPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + + /* + depending on the specific system, pagetables are allocated from system memory + or device local memory. For now, just look for at least a valid local heap/arena + */ + if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL) + { + //FIXME: replace with an RA, this allocator only handles 4k allocs + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + pMMUHeap->ui32PTSize, + SGX_MMU_PAGE_SIZE,//FIXME: assume 4K page size for now (wastes memory for smaller pagetables + IMG_NULL, + 0, + IMG_NULL, + (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr, + &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed")); + return IMG_FALSE; + } + + /* + Force the page to read only, we will make it read/write as + and when we need to + */ + MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr); + + /* translate address to device physical */ + if(psPTInfoList->PTPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle, + psPTInfoList->PTPageCpuVAddr); + } + else + { + /* This isn't used in all cases since not all ports currently support + * OSMemHandleToCpuPAddr() */ + sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0); + } + + sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + + /* + just allocate from the first local memory arena + (unlikely to be more than one local mem area(?)) + */ + //FIXME: just allocate a 4K page for each PT for now + if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE,//pMMUHeap->ui32PTSize, + 0, + IMG_NULL, + 0, + &(sSysPAddr.uiAddr))!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed")); + return IMG_FALSE; + } + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */ + psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psPTInfoList->hPTPageOSMemHandle); + if(!psPTInfoList->PTPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables")); + return IMG_FALSE; + } + + /* translate address to device physical */ + sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + + #if PAGE_TEST + PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr); + #endif + } + + MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + { + IMG_UINT32 *pui32Tmp; + IMG_UINT32 i; + + pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr; + /* point the new PT entries to the dummy data page */ + for(i=0; i<pMMUHeap->ui32PTNumEntriesUsable; i++) + { + pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + } + /* zero the remaining allocated entries, if any */ + for(; i<pMMUHeap->ui32PTNumEntriesAllocated; i++) + { + pui32Tmp[i] = 0; + } + } +#else + /* Zero the page table. */ + OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize); +#endif + MakeKernelPageReadOnly(psPTInfoList->PTPageCpuVAddr); + +#if defined(PDUMP) + { + IMG_UINT32 ui32Flags = 0; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* make sure shared heap PT allocs are always pdumped */ + ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + /* pdump the PT malloc */ + PDUMPMALLOCPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, psPTInfoList->hPTPageOSMemHandle, 0, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG); + /* pdump the PT Pages */ + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfoList->hPTPageOSMemHandle, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif + + /* return the DevPAddr */ + *psDevPAddr = sDevPAddr; + + return IMG_TRUE; +} + + +/*! +****************************************************************************** + FUNCTION: _FreePageTableMemory + + PURPOSE: Free physical memory for a page table + + PARAMETERS: In: pMMUHeap - the mmu + In: psPTInfoList - PT info to free + RETURNS: NONE +******************************************************************************/ +static IMG_VOID +_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList) +{ + /* + free the PT page: + depending on the specific system, pagetables are allocated from system memory + or device local memory. For now, just look for at least a valid local heap/arena + */ + if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL) + { + /* Force the page to read write before we free it*/ + MakeKernelPageReadWrite(psPTInfoList->PTPageCpuVAddr); + + //FIXME: replace with an RA, this allocator only handles 4k allocs + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + pMMUHeap->ui32PTSize, + psPTInfoList->PTPageCpuVAddr, + psPTInfoList->hPTPageOSMemHandle); + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + + /* derive the system physical address */ + sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->hPTPageOSMemHandle, + psPTInfoList->PTPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr); + + /* unmap the CPU mapping */ + /* note: actual ammount is pMMUHeap->ui32PTSize but must be a multiple of 4k pages */ + OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psPTInfoList->hPTPageOSMemHandle); + + /* + just free from the first local memory arena + (unlikely to be more than one local mem area(?)) + */ + RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + } +} + + + +/*! +****************************************************************************** + FUNCTION: _DeferredFreePageTable + + PURPOSE: Free one page table associated with an MMU. + + PARAMETERS: In: pMMUHeap - the mmu heap + In: ui32PTIndex - index of the page table to free relative + to the base of heap. + RETURNS: None +******************************************************************************/ +static IMG_VOID +_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT) +{ + IMG_UINT32 *pui32PDEntry; + IMG_UINT32 i; + IMG_UINT32 ui32PDIndex; + SYS_DATA *psSysData; + MMU_PT_INFO **ppsPTInfoList; + + SysAcquireData(&psSysData); + + /* find the index/offset in PD entries */ + ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* set the base PT info */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + { +#if PT_DEBUG + if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0) + { + DumpPT(ppsPTInfoList[ui32PTIndex]); + /* Fall-through, will fail assert */ + } +#endif + + /* Assert that all mappings have gone */ + PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0); + } + +#if defined(PDUMP) + { + IMG_UINT32 ui32Flags = 0; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + /* pdump the PT free */ + PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount); + if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr) + { + PDUMPFREEPAGETABLE(&pMMUHeap->psMMUContext->psDeviceNode->sDevId, ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, ui32Flags, PDUMP_PT_UNIQUETAG); + } + } +#endif + + switch(pMMUHeap->psDevArena->DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED : + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED : + { + /* Remove Page Table from all memory contexts */ + MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; + + while(psMMUContext) + { + /* get the PD CPUVAddr base and advance to the first entry */ + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PD entry to the dummy PT */ + pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr + >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; +#else + /* free the entry */ + if(bOSFreePT) + { + pui32PDEntry[ui32PTIndex] = 0; + } +#endif + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + #if defined(PDUMP) + /* pdump the PD Page modifications */ + #if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if(psMMUContext->bPDumpActive) + #endif + { + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + #endif + /* advance to next context */ + psMMUContext = psMMUContext->psNext; + } + break; + } + case DEVICE_MEMORY_HEAP_PERCONTEXT : + case DEVICE_MEMORY_HEAP_KERNEL : + { + MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr); + /* Remove Page Table from this memory context only */ + pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PD entry to the dummy PT */ + pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr + >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; +#else + /* free the entry */ + if(bOSFreePT) + { + pui32PDEntry[ui32PTIndex] = 0; + } +#endif + MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr); + + /* pdump the PD Page modifications */ + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type")); + return; + } + } + + /* clear the PT entries in each PT page */ + if(ppsPTInfoList[ui32PTIndex] != IMG_NULL) + { + if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL) + { + IMG_PUINT32 pui32Tmp; + + MakeKernelPageReadWrite(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr); + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr; + + /* clear the entries */ + for(i=0; + (i<pMMUHeap->ui32PTETotalUsable) && (i<pMMUHeap->ui32PTNumEntriesUsable); + i++) + { + /* over-allocated PT entries for 4MB data page case should never be non-zero */ + pui32Tmp[i] = 0; + } + MakeKernelPageReadOnly(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr); + + /* + free the pagetable memory + */ + if(bOSFreePT) + { + _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]); + } + + /* + decrement the PT Entry Count by the number + of entries we've cleared in this pass + */ + pMMUHeap->ui32PTETotalUsable -= i; + } + else + { + /* decrement the PT Entry Count by a page's worth of entries */ + pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable; + } + + if(bOSFreePT) + { + /* free the pt info */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(MMU_PT_INFO), + ppsPTInfoList[ui32PTIndex], + IMG_NULL); + ppsPTInfoList[ui32PTIndex] = IMG_NULL; + } + } + else + { + /* decrement the PT Entry Count by a page's worth of usable entries */ + pMMUHeap->ui32PTETotalUsable -= pMMUHeap->ui32PTNumEntriesUsable; + } + + PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount); +} + +/*! +****************************************************************************** + FUNCTION: _DeferredFreePageTables + + PURPOSE: Free the page tables associated with an MMU. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +static IMG_VOID +_DeferredFreePageTables (MMU_HEAP *pMMUHeap) +{ + IMG_UINT32 i; +#if defined(FIX_HW_BRN_31620) + MMU_CONTEXT *psMMUContext = pMMUHeap->psMMUContext; + IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 *pui32Tmp; + IMG_UINT32 j; +#endif +#if defined(PDUMP) + PDUMPCOMMENT("Free PTs (MMU Context ID == %u, PDBaseIndex == %u, PT count == 0x%x)", + pMMUHeap->psMMUContext->ui32PDumpMMUContextID, + pMMUHeap->ui32PDBaseIndex, + pMMUHeap->ui32PageTableCount); +#endif +#if defined(FIX_HW_BRN_31620) + for(i=0; i<pMMUHeap->ui32PageTableCount; i++) + { + ui32PDIndex = (pMMUHeap->ui32PDBaseIndex + i); + + if (psMMUContext->apsPTInfoList[ui32PDIndex]) + { + if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr) + { + /* + * We have to do this to setup the dummy page as + * not all heaps are PD cache size or aligned + */ + for (j=0;j<SGX_MMU_PT_SIZE;j++) + { + pui32Tmp = (IMG_UINT32 *) psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + BRN31620InvalidatePageTableEntry(psMMUContext, ui32PDIndex, j, &pui32Tmp[j]); + } + } + /* Free the PT and NULL's out the PTInfo */ + if (BRN31620FreePageTable(pMMUHeap, ui32PDIndex) == IMG_TRUE) + { + bInvalidateDirectoryCache = IMG_TRUE; + } + } + } + + /* + * Due to freeing PT's in chunks we might need to flush the PT cache + * rather then the directory cache + */ + if (bInvalidateDirectoryCache) + { + MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo); + } + else + { + MMU_InvalidatePageTableCache(pMMUHeap->psMMUContext->psDevInfo); + } +#else + for(i=0; i<pMMUHeap->ui32PageTableCount; i++) + { + _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE); + } + MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo); +#endif +} + + +/*! +****************************************************************************** + FUNCTION: _DeferredAllocPagetables + + PURPOSE: allocates page tables at time of allocation + + PARAMETERS: In: pMMUHeap - the mmu heap + DevVAddr - devVAddr of allocation + ui32Size - size of allocation + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failed +******************************************************************************/ +static IMG_BOOL +_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size) +{ + IMG_UINT32 ui32PageTableCount; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 i; + IMG_UINT32 *pui32PDEntry; + MMU_PT_INFO **ppsPTInfoList; + SYS_DATA *psSysData; + IMG_DEV_VIRTADDR sHighDevVAddr; +#if defined(FIX_HW_BRN_31620) + IMG_BOOL bFlushSystemCache = IMG_FALSE; + IMG_BOOL bSharedPT = IMG_FALSE; + IMG_DEV_VIRTADDR sDevVAddrRequestStart; + IMG_DEV_VIRTADDR sDevVAddrRequestEnd; + IMG_UINT32 ui32PDRequestStart; + IMG_UINT32 ui32PDRequestEnd; + IMG_UINT32 ui32ModifiedCachelines[BRN31620_CACHE_FLUSH_INDEX_SIZE]; +#endif + + /* Check device linear address */ +#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32 + PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE)); +#endif + + /* get the sysdata */ + SysAcquireData(&psSysData); + + /* find the index/offset in PD entries */ + ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* how many PDs does the allocation occupy? */ + /* first check for overflows */ + if((UINT32_MAX_VALUE - DevVAddr.uiAddr) + < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask)) + { + /* detected overflow, clamp to highest address */ + sHighDevVAddr.uiAddr = UINT32_MAX_VALUE; + } + else + { + sHighDevVAddr.uiAddr = DevVAddr.uiAddr + + ui32Size + + pMMUHeap->ui32DataPageMask + + pMMUHeap->ui32PTMask; + } + + ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* Fix allocation of last 4MB */ + if (ui32PageTableCount == 0) + ui32PageTableCount = 1024; + +#if defined(FIX_HW_BRN_31620) + for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++) + { + ui32ModifiedCachelines[i] = 0; + } + + /*****************************************************************/ + /* Save off requested data and round allocation to PD cache line */ + /*****************************************************************/ + sDevVAddrRequestStart = DevVAddr; + ui32PDRequestStart = ui32PDIndex; + sDevVAddrRequestEnd = sHighDevVAddr; + ui32PDRequestEnd = ui32PageTableCount - 1; + + /* Round allocations down to the PD cacheline */ + DevVAddr.uiAddr = DevVAddr.uiAddr & (~BRN31620_PDE_CACHE_FILL_MASK); + + /* Round the end address of the PD allocation to cacheline */ + sHighDevVAddr.uiAddr = ((sHighDevVAddr.uiAddr + (BRN31620_PDE_CACHE_FILL_SIZE - 1)) & (~BRN31620_PDE_CACHE_FILL_MASK)); + + ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* Fix allocation of last 4MB */ + if (ui32PageTableCount == 0) + ui32PageTableCount = 1024; +#endif + + ui32PageTableCount -= ui32PDIndex; + + /* get the PD CPUVAddr base and advance to the first entry */ + pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + + /* and advance to the first PT info list */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + +#if defined(PDUMP) + { + IMG_UINT32 ui32Flags = 0; + + /* pdump the PD Page modifications */ + if( MMU_IsHeapShared(pMMUHeap) ) + { + ui32Flags |= PDUMP_FLAGS_CONTINUOUS; + } + PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc PTs (MMU Context ID == %u, PDBaseIndex == %u, Size == 0x%x)", + pMMUHeap->psMMUContext->ui32PDumpMMUContextID, + pMMUHeap->ui32PDBaseIndex, + ui32Size); + PDUMPCOMMENTWITHFLAGS(ui32Flags, "Alloc page table (page count == %08X)", ui32PageTableCount); + PDUMPCOMMENTWITHFLAGS(ui32Flags, "Page directory mods (page count == %08X)", ui32PageTableCount); + } +#endif + /* walk the psPTInfoList to see what needs allocating: */ + for(i=0; i<ui32PageTableCount; i++) + { + if(ppsPTInfoList[i] == IMG_NULL) + { +#if defined(FIX_HW_BRN_31620) + /* Check if we have a saved PT (i.e. this PDE cache line is still live) */ + if (pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i]) + { + /* Only make this PTInfo "live" if it's requested */ + if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd)) + { + IMG_UINT32 ui32PDCacheLine = (ui32PDIndex + i) >> BRN31620_PDES_PER_CACHE_LINE_SHIFT; + + ppsPTInfoList[i] = pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i]; + pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = IMG_NULL; + + pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++; + } + } + else + { +#endif + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (MMU_PT_INFO), + (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL, + "MMU Page Table Info"); + if (ppsPTInfoList[i] == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed")); + return IMG_FALSE; + } + OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO)); +#if defined(FIX_HW_BRN_31620) + } +#endif + } +#if defined(FIX_HW_BRN_31620) + /* Only try to allocate if ppsPTInfoList[i] is valid */ + if (ppsPTInfoList[i]) + { +#endif + if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL + && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL) + { + IMG_DEV_PHYADDR sDevPAddr; +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + IMG_UINT32 *pui32Tmp; + IMG_UINT32 j; +#else +#if !defined(FIX_HW_BRN_31620) + /* no page table has been allocated so allocate one */ + PVR_ASSERT(pui32PDEntry[i] == 0); +#endif +#endif + if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed")); + return IMG_FALSE; + } +#if defined(FIX_HW_BRN_31620) + bFlushSystemCache = IMG_TRUE; + /* Bump up the page table count if required */ + { + IMG_UINT32 ui32PD; + IMG_UINT32 ui32PDCacheLine; + IMG_UINT32 ui32PDBitMaskIndex; + IMG_UINT32 ui32PDBitMaskShift; + + ui32PD = ui32PDIndex + i; + ui32PDCacheLine = ui32PD >> BRN31620_PDES_PER_CACHE_LINE_SHIFT; + ui32PDBitMaskIndex = ui32PDCacheLine >> BRN31620_CACHE_FLUSH_BITS_SHIFT; + ui32PDBitMaskShift = ui32PDCacheLine & BRN31620_CACHE_FLUSH_BITS_MASK; + ui32ModifiedCachelines[ui32PDBitMaskIndex] |= 1 << ui32PDBitMaskShift; + + /* Add 1 to ui32PD as we want the count, not a range */ + if ((pMMUHeap->ui32PDBaseIndex + pMMUHeap->ui32PageTableCount) < (ui32PD + 1)) + { + pMMUHeap->ui32PageTableCount = (ui32PD + 1) - pMMUHeap->ui32PDBaseIndex; + } + + if (((ui32PDIndex + i) >= ui32PDRequestStart) && ((ui32PDIndex + i) <= ui32PDRequestEnd)) + { + pMMUHeap->psMMUContext->ui32PDCacheRangeRefCount[ui32PDCacheLine]++; + } + } +#endif + switch(pMMUHeap->psDevArena->DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_SHARED : + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED : + { + /* insert Page Table into all memory contexts */ + MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; + + while(psMMUContext) + { + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + /* get the PD CPUVAddr base and advance to the first entry */ + pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr; + pui32PDEntry += ui32PDIndex; + + /* insert the page, specify the data page size and make the pde valid */ + pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | pMMUHeap->ui32PDEPageSizeCtrl + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + #if defined(PDUMP) + /* pdump the PD Page modifications */ + #if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if(psMMUContext->bPDumpActive) + #endif + { + //PDUMPCOMMENT("_DeferredAllocPTs: Dumping shared PDEs on context %d (%s)", psMMUContext->ui32PDumpMMUContextID, (psMMUContext->bPDumpActive) ? "active" : ""); + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + #endif /* PDUMP */ + /* advance to next context */ + psMMUContext = psMMUContext->psNext; + } +#if defined(FIX_HW_BRN_31620) + bSharedPT = IMG_TRUE; +#endif + break; + } + case DEVICE_MEMORY_HEAP_PERCONTEXT : + case DEVICE_MEMORY_HEAP_KERNEL : + { + MakeKernelPageReadWrite(pMMUHeap->psMMUContext->pvPDCpuVAddr); + /* insert Page Table into only this memory context */ + pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | pMMUHeap->ui32PDEPageSizeCtrl + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(pMMUHeap->psMMUContext->pvPDCpuVAddr); + /* pdump the PD Page modifications */ + //PDUMPCOMMENT("_DeferredAllocPTs: Dumping kernel PDEs on context %d (%s)", pMMUHeap->psMMUContext->ui32PDumpMMUContextID, (pMMUHeap->psMMUContext->bPDumpActive) ? "active" : ""); + PDUMPPDENTRIES(&pMMUHeap->sMMUAttrib, pMMUHeap->psMMUContext->hPDOSMemHandle, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type")); + return IMG_FALSE; + } + } + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + /* This is actually not to do with multiple mem contexts, but to do with the directory cache. + In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the + page directory memory, and requires updating whenever the page directory changes, even if there + was no previous value in a particular entry + */ + MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo); +#endif +#if defined(FIX_HW_BRN_31620) + /* If this PT is not in the requested range then save it and null out the main PTInfo */ + if (((ui32PDIndex + i) < ui32PDRequestStart) || ((ui32PDIndex + i) > ui32PDRequestEnd)) + { + pMMUHeap->psMMUContext->apsPTInfoListSave[ui32PDIndex + i] = ppsPTInfoList[i]; + ppsPTInfoList[i] = IMG_NULL; + } +#endif + } + else + { +#if !defined(FIX_HW_BRN_31620) + /* already have an allocated PT */ + PVR_ASSERT(pui32PDEntry[i] != 0); +#endif + } +#if defined(FIX_HW_BRN_31620) + } +#endif + } + + #if defined(SGX_FEATURE_SYSTEM_CACHE) + #if defined(FIX_HW_BRN_31620) + /* This function might not allocate any new PT's so check before flushing */ + if (bFlushSystemCache) + { + #endif + + MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo); + #endif /* SGX_FEATURE_SYSTEM_CACHE */ + #if defined(FIX_HW_BRN_31620) + } + + /* Handle the last 4MB roll over */ + sHighDevVAddr.uiAddr = sHighDevVAddr.uiAddr - 1; + + /* Update our PD flush mask if required */ + if (bFlushSystemCache) + { + MMU_CONTEXT *psMMUContext; + + if (bSharedPT) + { + MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; + + while(psMMUContext) + { + for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++) + { + psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i]; + } + + /* advance to next context */ + psMMUContext = psMMUContext->psNext; + } + } + else + { + for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++) + { + pMMUHeap->psMMUContext->ui32PDChangeMask[i] |= ui32ModifiedCachelines[i]; + } + } + + /* + * Always hook up the dummy page when we allocate a new range of PTs. + * It might be this is overwritten before the SGX access the dummy page + * but we don't care, it's a lot simpler to add this logic here. + */ + psMMUContext = pMMUHeap->psMMUContext; + for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++) + { + IMG_UINT32 j; + + for(j=0;j<BRN31620_CACHE_FLUSH_BITS_SIZE;j++) + { + if (ui32ModifiedCachelines[i] & (1 << j)) + { + PVRSRV_SGXDEV_INFO *psDevInfo = psMMUContext->psDevInfo; + MMU_PT_INFO *psTempPTInfo = IMG_NULL; + IMG_UINT32 *pui32Tmp; + + ui32PDIndex = (((i * BRN31620_CACHE_FLUSH_BITS_SIZE) + j) * BRN31620_PDES_PER_CACHE_LINE_SIZE) + BRN31620_DUMMY_PDE_INDEX; + + /* The PT for the dummy page might not be "live". If not get it from the saved pointer */ + if (psMMUContext->apsPTInfoList[ui32PDIndex]) + { + psTempPTInfo = psMMUContext->apsPTInfoList[ui32PDIndex]; + } + else + { + psTempPTInfo = psMMUContext->apsPTInfoListSave[ui32PDIndex]; + } + + PVR_ASSERT(psTempPTInfo != IMG_NULL); + + MakeKernelPageReadWrite(psTempPTInfo->PTPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *) psTempPTInfo->PTPageCpuVAddr; + PVR_ASSERT(pui32Tmp != IMG_NULL); + pui32Tmp[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID; + MakeKernelPageReadOnly(psTempPTInfo->PTPageCpuVAddr); + PDUMPCOMMENT("BRN31620 Dump PTE for dummy page after wireing up new PT"); + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psTempPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32Tmp[BRN31620_DUMMY_PTE_INDEX], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + } + } + } + #endif + + return IMG_TRUE; +} + + +#if defined(PDUMP) +/*! + * FUNCTION: MMU_GetPDumpContextID + * + * RETURNS: pdump MMU context ID + */ +IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext) +{ + BM_CONTEXT *pBMContext = hDevMemContext; + PVR_ASSERT(pBMContext); + /* PRQA S 0505 1 */ /* PVR_ASSERT should catch NULL ptr */ + return pBMContext->psMMUContext->ui32PDumpMMUContextID; +} + +/*! + * FUNCTION: MMU_SetPDumpAttribs + * + * PURPOSE: Called from MMU_Initialise and MMU_Create. + * Sets up device-specific attributes for pdumping. + * FIXME: breaks variable size PTs. Really need separate per context + * and per heap attribs. + * + * INPUT: psDeviceNode - used to access deviceID + * INPUT: ui32DataPageMask - data page mask + * INPUT: ui32PTSize - PT size + * + * OUTPUT: psMMUAttrib - pdump MMU attributes + * + * RETURNS: none + */ +#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) +# error "FIXME: breaks variable size pagetables" +#endif +static IMG_VOID MMU_SetPDumpAttribs(PDUMP_MMU_ATTRIB *psMMUAttrib, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32DataPageMask, + IMG_UINT32 ui32PTSize) +{ + /* Sets up device ID, contains pdump memspace name */ + psMMUAttrib->sDevId = psDeviceNode->sDevId; + + psMMUAttrib->pszPDRegRegion = IMG_NULL; + psMMUAttrib->ui32DataPageMask = ui32DataPageMask; + + psMMUAttrib->ui32PTEValid = SGX_MMU_PTE_VALID; + psMMUAttrib->ui32PTSize = ui32PTSize; + psMMUAttrib->ui32PTEAlignShift = SGX_MMU_PTE_ADDR_ALIGNSHIFT; + + psMMUAttrib->ui32PDEMask = SGX_MMU_PDE_ADDR_MASK; + psMMUAttrib->ui32PDEAlignShift = SGX_MMU_PDE_ADDR_ALIGNSHIFT; +} +#endif /* PDUMP */ + +/*! +****************************************************************************** + FUNCTION: MMU_Initialise + + PURPOSE: Called from BM_CreateContext. + Allocates the top level Page Directory 4k Page for the new context. + + PARAMETERS: None + RETURNS: PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR +MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr) +{ + IMG_UINT32 *pui32Tmp; + IMG_UINT32 i; + IMG_CPU_VIRTADDR pvPDCpuVAddr; + IMG_DEV_PHYADDR sPDDevPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + MMU_CONTEXT *psMMUContext; + IMG_HANDLE hPDOSMemHandle = IMG_NULL; + SYS_DATA *psSysData; + PVRSRV_SGXDEV_INFO *psDevInfo; +#if defined(PDUMP) + PDUMP_MMU_ATTRIB sMMUAttrib; +#endif + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise")); + + SysAcquireData(&psSysData); +#if defined(PDUMP) + /* Note: these attribs are on the stack, used only to pdump the MMU context + * creation. */ + MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode, + SGX_MMU_PAGE_MASK, + SGX_MMU_PT_SIZE * sizeof(IMG_UINT32)); +#endif + + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (MMU_CONTEXT), + (IMG_VOID **)&psMMUContext, IMG_NULL, + "MMU Context"); + if (psMMUContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT)); + + /* stick the devinfo in the context for subsequent use */ + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + psMMUContext->psDevInfo = psDevInfo; + + /* record device node for subsequent use */ + psMMUContext->psDeviceNode = psDeviceNode; + + /* allocate 4k page directory page for the new context */ + if(psDeviceNode->psLocalDevMemArena == IMG_NULL) + { + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &pvPDCpuVAddr, + &hPDOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + } + + if(pvPDCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(hPDOSMemHandle, + pvPDCpuVAddr); + } + else + { + /* This is not used in all cases, since not all ports currently + * support OSMemHandleToCpuPAddr */ + sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0); + } + sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + + #if PAGE_TEST + PageTest(pvPDCpuVAddr, sPDDevPAddr); + #endif + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* Allocate dummy PT and Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + /* Dummy PT page */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvDummyPTPageCpuVAddr, + &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + } + + if(psDevInfo->pvDummyPTPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle, + psDevInfo->pvDummyPTPageCpuVAddr); + } + else + { + /* This is not used in all cases, since not all ports currently + * support OSMemHandleToCpuPAddr */ + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0); + } + psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + + /* Dummy Data page */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvDummyDataPageCpuVAddr, + &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + } + + if(psDevInfo->pvDummyDataPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle, + psDevInfo->pvDummyDataPageCpuVAddr); + } + else + { + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0); + } + psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + } +#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */ +#if defined(FIX_HW_BRN_31620) + /* Allocate dummy Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + IMG_UINT32 j; + /* Allocate dummy page */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvBRN31620DummyPageCpuVAddr, + &psDevInfo->hBRN31620DummyPageOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + } + + /* Get a physical address */ + if(psDevInfo->pvBRN31620DummyPageCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle, + psDevInfo->pvBRN31620DummyPageCpuVAddr); + } + else + { + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPageOSMemHandle, 0); + } + + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr; + for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++) + { + pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE; + } + + psDevInfo->sBRN31620DummyPageDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + /* Allocate dummy PT */ + if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + &psDevInfo->pvBRN31620DummyPTCpuVAddr, + &psDevInfo->hBRN31620DummyPTOSMemHandle) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; + } + + /* Get a physical address */ + if(psDevInfo->pvBRN31620DummyPTCpuVAddr) + { + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle, + psDevInfo->pvBRN31620DummyPTCpuVAddr); + } + else + { + sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hBRN31620DummyPTOSMemHandle, 0); + } + + OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE); + psDevInfo->sBRN31620DummyPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + } +#endif + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + + /* allocate from the device's local memory arena */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &(sSysPAddr.uiAddr))!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + } + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &hPDOSMemHandle); + if(!pvPDCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + } + + #if PAGE_TEST + PageTest(pvPDCpuVAddr, sPDDevPAddr); + #endif + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* Allocate dummy PT and Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + /* Dummy PT page */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &(sSysPAddr.uiAddr))!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + } + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hDummyPTPageOSMemHandle); + if(!psDevInfo->pvDummyPTPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + } + + /* Dummy Data page */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &(sSysPAddr.uiAddr))!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + } + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hDummyDataPageOSMemHandle); + if(!psDevInfo->pvDummyDataPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + } + } +#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */ +#if defined(FIX_HW_BRN_31620) + /* Allocate dummy PT and Data pages for the first context to be created */ + if(!psDevInfo->pvMMUContextList) + { + IMG_UINT32 j; + /* Allocate dummy page */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &(sSysPAddr.uiAddr))!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + } + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sBRN31620DummyPageDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvBRN31620DummyPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hBRN31620DummyPageOSMemHandle); + if(!psDevInfo->pvBRN31620DummyPageCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + } + + MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvBRN31620DummyPageCpuVAddr; + for(j=0; j<(SGX_MMU_PAGE_SIZE/4); j++) + { + pui32Tmp[j] = BRN31620_DUMMY_PAGE_SIGNATURE; + } + MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPageCpuVAddr); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, 0, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + /* Allocate dummy PT */ + if(RA_Alloc(psDeviceNode->psLocalDevMemArena, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &(sSysPAddr.uiAddr))!= IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed")); + return PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY; + } + + /* derive the CPU virtual address */ + sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); + psDevInfo->sBRN31620DummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); + psDevInfo->pvBRN31620DummyPTCpuVAddr = OSMapPhysToLin(sCpuPAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &psDevInfo->hBRN31620DummyPTOSMemHandle); + + if(!psDevInfo->pvBRN31620DummyPTCpuVAddr) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables")); + return PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE; + } + + OSMemSet(psDevInfo->pvBRN31620DummyPTCpuVAddr,0,SGX_MMU_PAGE_SIZE); + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, 0, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + } +#endif /* #if defined(FIX_HW_BRN_31620) */ + } + +#if defined(FIX_HW_BRN_31620) + if (!psDevInfo->pvMMUContextList) + { + /* Save the kernel MMU context which is always the 1st to be created */ + psDevInfo->hKernelMMUContext = psMMUContext; + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: saving kernel mmu context: %p", psMMUContext)); + } +#endif + +#if defined(PDUMP) +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* Find out if this context is for the active pdump client. + * If it is, need to ensure PD entries are pdumped whenever another + * process allocates from a shared heap. */ + { + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc == IMG_NULL) + { + /* changes to the kernel context PD/PTs should be pdumped */ + psMMUContext->bPDumpActive = IMG_TRUE; + } + else + { + psMMUContext->bPDumpActive = psPerProc->bPDumpActive; + } + } +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + /* pdump the PD malloc */ +#if IMG_ADDRSPACE_PHYSADDR_BITS == 32 + PDUMPCOMMENT("Alloc page directory for new MMU context (PDDevPAddr == 0x%08x)", + sPDDevPAddr.uiAddr); +#else + PDUMPCOMMENT("Alloc page directory for new MMU context, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)", + sPDDevPAddr.uiHighAddr, sPDDevPAddr.uiAddr); +#endif + PDUMPMALLOCPAGETABLE(&psDeviceNode->sDevId, hPDOSMemHandle, 0, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PD_UNIQUETAG); +#endif /* PDUMP */ + +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(psMMUContext); +#endif + + if (pvPDCpuVAddr) + { + pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid")); + return PVRSRV_ERROR_INVALID_CPU_ADDR; + } + + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + MakeKernelPageReadWrite(pvPDCpuVAddr); + /* wire-up the new PD to the dummy PT */ + for(i=0; i<SGX_MMU_PD_SIZE; i++) + { + pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; + } + MakeKernelPageReadOnly(pvPDCpuVAddr); + + if(!psDevInfo->pvMMUContextList) + { + /* + if we've just allocated the dummy pages + wire up the dummy PT to the dummy data page + */ + MakeKernelPageReadWrite(psDevInfo->pvDummyPTPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr; + for(i=0; i<SGX_MMU_PT_SIZE; i++) + { + pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + } + MakeKernelPageReadOnly(psDevInfo->pvDummyPTPageCpuVAddr); + /* pdump the Dummy PT Page */ + PDUMPCOMMENT("Dummy Page table contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hDummyPTOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + + /* + write a signature to the dummy data page + */ + MakeKernelPageReadWrite(psDevInfo->pvDummyDataPageCpuVAddr); + pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr; + for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++) + { + pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE; + } + MakeKernelPageReadOnly(psDevInfo->pvDummyDataPageCpuVAddr); + /* pdump the Dummy Data Page */ + PDUMPCOMMENT("Dummy Data Page contents"); + PDUMPMEMPTENTRIES(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#else /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */ + /* initialise the PD to invalid address state */ + MakeKernelPageReadWrite(pvPDCpuVAddr); + for(i=0; i<SGX_MMU_PD_SIZE; i++) + { + /* invalid, no read, no write, no cache consistency */ + pui32Tmp[i] = 0; + } + MakeKernelPageReadOnly(pvPDCpuVAddr); +#endif /* #if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) */ + +#if defined(PDUMP) +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if(psMMUContext->bPDumpActive) +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + { + /* pdump the PD Page */ + PDUMPCOMMENT("Page directory contents"); + PDUMPPDENTRIES(&sMMUAttrib, hPDOSMemHandle, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif +#if defined(FIX_HW_BRN_31620) + { + IMG_UINT32 i; + IMG_UINT32 ui32PDCount = 0; + IMG_UINT32 *pui32PT; + pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr; + + PDUMPCOMMENT("BRN31620 Set up dummy PT"); + + MakeKernelPageReadWrite(psDevInfo->pvBRN31620DummyPTCpuVAddr); + pui32PT = (IMG_UINT32 *) psDevInfo->pvBRN31620DummyPTCpuVAddr; + pui32PT[BRN31620_DUMMY_PTE_INDEX] = (psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID; + MakeKernelPageReadOnly(psDevInfo->pvBRN31620DummyPTCpuVAddr); + +#if defined(PDUMP) + /* Dump initial contents */ + PDUMPCOMMENT("BRN31620 Dump dummy PT contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + PDUMPCOMMENT("BRN31620 Dump dummy page contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + + /* Dump the wiring */ + for(i=0;i<SGX_MMU_PT_SIZE;i++) + { + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPTOSMemHandle, &pui32PT[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif + PDUMPCOMMENT("BRN31620 Dump PDE wire up"); + /* Walk the PD wireing up the PT's */ + for(i=0;i<SGX_MMU_PD_SIZE;i++) + { + pui32Tmp[i] = 0; + + if (ui32PDCount == BRN31620_DUMMY_PDE_INDEX) + { + MakeKernelPageReadWrite(pvPDCpuVAddr); + pui32Tmp[i] = (psDevInfo->sBRN31620DummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_DUMMY_PAGE + | SGX_MMU_PDE_VALID; + MakeKernelPageReadOnly(pvPDCpuVAddr); + } + PDUMPMEMPTENTRIES(&sMMUAttrib, hPDOSMemHandle, (IMG_VOID *) &pui32Tmp[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); + ui32PDCount++; + if (ui32PDCount == BRN31620_PDES_PER_CACHE_LINE_SIZE) + { + /* Reset PT count */ + ui32PDCount = 0; + } + } + + + /* pdump the Dummy PT Page */ + PDUMPCOMMENT("BRN31620 dummy Page table contents"); + PDUMPMEMPTENTRIES(&sMMUAttrib, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } +#endif +#if defined(PDUMP) + /* pdump set MMU context */ + { + PVRSRV_ERROR eError; + /* default MMU type is 1, 4k page */ + IMG_UINT32 ui32MMUType = 1; + + #if defined(SGX_FEATURE_36BIT_MMU) + ui32MMUType = 3; + #else + #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) + ui32MMUType = 2; + #endif + #endif + + eError = PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX, + psDeviceNode->sDevId.pszPDumpDevName, + &psMMUContext->ui32PDumpMMUContextID, + ui32MMUType, + PDUMP_PT_UNIQUETAG, + hPDOSMemHandle, + pvPDCpuVAddr); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed")); + return eError; + } + } + + /* PDump the context ID */ + PDUMPCOMMENT("Set MMU context complete (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID); +#endif + +#if defined(FIX_HW_BRN_31620) + for(i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++) + { + psMMUContext->ui32PDChangeMask[i] = 0; + } + + for(i=0;i<BRN31620_CACHE_FLUSH_SIZE;i++) + { + psMMUContext->ui32PDCacheRangeRefCount[i] = 0; + } + + for(i=0;i<SGX_MAX_PD_ENTRIES;i++) + { + psMMUContext->apsPTInfoListSave[i] = IMG_NULL; + } +#endif + /* store PD info in the MMU context */ + psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr; + psMMUContext->sPDDevPAddr = sPDDevPAddr; + psMMUContext->hPDOSMemHandle = hPDOSMemHandle; + + /* Get some process information to aid debug */ + psMMUContext->ui32PID = OSGetCurrentProcessIDKM(); + psMMUContext->szName[0] = '\0'; + OSGetCurrentProcessNameKM(psMMUContext->szName, MMU_CONTEXT_NAME_SIZE); + + /* return context */ + *ppsMMUContext = psMMUContext; + + /* return the PD DevVAddr */ + *psPDDevPAddr = sPDDevPAddr; + + + /* add the new MMU context onto the list of MMU contexts */ + psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList; + psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext; + +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(psMMUContext); +#endif + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + FUNCTION: MMU_Finalise + + PURPOSE: Finalise the mmu module, deallocate all resources. + + PARAMETERS: In: psMMUContext - MMU context to deallocate + RETURNS: None. +******************************************************************************/ +IMG_VOID +MMU_Finalise (MMU_CONTEXT *psMMUContext) +{ + IMG_UINT32 *pui32Tmp, i; + SYS_DATA *psSysData; + MMU_CONTEXT **ppsMMUContext; +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined(FIX_HW_BRN_31620) + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo; + MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList; +#endif + + SysAcquireData(&psSysData); + +#if defined(PDUMP) + /* pdump the MMU context clear */ + PDUMPCOMMENT("Clear MMU context (MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID); + PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->psDeviceNode->sDevId.pszPDumpDevName, psMMUContext->ui32PDumpMMUContextID, 2); + + /* pdump the PD free */ +#if IMG_ADDRSPACE_PHYSADDR_BITS == 32 + PDUMPCOMMENT("Free page directory (PDDevPAddr == 0x%08x)", + psMMUContext->sPDDevPAddr.uiAddr); +#else + PDUMPCOMMENT("Free page directory, 64-bit arch detected (PDDevPAddr == 0x%08x%08x)", + psMMUContext->sPDDevPAddr.uiHighAddr, psMMUContext->sPDDevPAddr.uiAddr); +#endif +#endif /* PDUMP */ + + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psMMUContext->hPDOSMemHandle, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyPTPageOSMemHandle, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hDummyDataPageOSMemHandle, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); +#endif + + pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr; + + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + /* initialise the PD to invalid address state */ + for(i=0; i<SGX_MMU_PD_SIZE; i++) + { + /* invalid, no read, no write, no cache consistency */ + pui32Tmp[i] = 0; + } + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + + /* + free the PD: + depending on the specific system, the PD is allocated from system memory + or device local memory. For now, just look for at least a valid local heap/arena + */ + if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL) + { +#if defined(FIX_HW_BRN_31620) + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo; +#endif + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psMMUContext->pvPDCpuVAddr, + psMMUContext->hPDOSMemHandle); + +#if defined(FIX_HW_BRN_31620) + /* If this is the _last_ MMU context it must be the uKernel */ + if (!psMMUContextList->psNext) + { + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvBRN31620DummyPageCpuVAddr, + psDevInfo->hBRN31620DummyPageOSMemHandle); + + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvBRN31620DummyPTCpuVAddr, + psDevInfo->hBRN31620DummyPTOSMemHandle); + + } +#endif +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* if this is the last context free the dummy pages too */ + if(!psMMUContextList->psNext) + { + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvDummyPTPageCpuVAddr, + psDevInfo->hDummyPTPageOSMemHandle); + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + SGX_MMU_PAGE_SIZE, + psDevInfo->pvDummyDataPageCpuVAddr, + psDevInfo->hDummyDataPageOSMemHandle); + } +#endif + } + else + { + IMG_SYS_PHYADDR sSysPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + + /* derive the system physical address */ + sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->hPDOSMemHandle, + psMMUContext->pvPDCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psMMUContext->hPDOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* if this is the last context free the dummy pages too */ + if(!psMMUContextList->psNext) + { + /* free the Dummy PT Page */ + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyPTPageOSMemHandle, + psDevInfo->pvDummyPTPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hDummyPTPageOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + + /* free the Dummy Data Page */ + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hDummyDataPageOSMemHandle, + psDevInfo->pvDummyDataPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hDummyDataPageOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + } +#endif +#if defined(FIX_HW_BRN_31620) + /* if this is the last context free the dummy pages too */ + if(!psMMUContextList->psNext) + { + /* free the Page */ + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPageOSMemHandle, psDevInfo->pvBRN31620DummyPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPageOSMemHandle, + psDevInfo->pvBRN31620DummyPageCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPageCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hBRN31620DummyPageOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + + /* free the Dummy PT */ + PDUMPFREEPAGETABLE(&psMMUContext->psDeviceNode->sDevId, psDevInfo->hBRN31620DummyPTOSMemHandle, psDevInfo->pvBRN31620DummyPTCpuVAddr, SGX_MMU_PAGE_SIZE, 0, PDUMP_PT_UNIQUETAG); + + sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->hBRN31620DummyPTOSMemHandle, + psDevInfo->pvBRN31620DummyPTCpuVAddr); + sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); + + /* unmap the CPU mapping */ + OSUnMapPhysToLin(psDevInfo->pvBRN31620DummyPTCpuVAddr, + SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hBRN31620DummyPTOSMemHandle); + /* and free the memory */ + RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); + } +#endif + } + + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise")); + + /* remove the MMU context from the list of MMU contexts */ + ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList; + while(*ppsMMUContext) + { + if(*ppsMMUContext == psMMUContext) + { + /* remove item from the list */ + *ppsMMUContext = psMMUContext->psNext; + break; + } + + /* advance to next next */ + ppsMMUContext = &((*ppsMMUContext)->psNext); + } + + /* free the context itself. */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL); + /*not nulling pointer, copy on stack*/ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_InsertHeap + + PURPOSE: Copies PDEs from shared/exported heap into current MMU context. + + PARAMETERS: In: psMMUContext - the mmu + In: psMMUHeap - a shared/exported heap + + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap) +{ + IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr; + IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr; + IMG_UINT32 ui32PDEntry; +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; +#endif + + /* advance to the first entry */ + pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift; + pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift; + + /* + update the PD range relating to the heap's + device virtual address range + */ +#if defined(PDUMP) + PDUMPCOMMENT("Page directory shared heap range copy"); + PDUMPCOMMENT(" (Source heap MMU Context ID == %u, PT count == 0x%x)", + psMMUHeap->psMMUContext->ui32PDumpMMUContextID, + psMMUHeap->ui32PageTableCount); + PDUMPCOMMENT(" (Destination MMU Context ID == %u)", psMMUContext->ui32PDumpMMUContextID); +#endif /* PDUMP */ +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(psMMUContext); +#endif + + for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++) + { +#if (!defined(SUPPORT_SGX_MMU_DUMMY_PAGE)) && (!defined(FIX_HW_BRN_31620)) + /* check we have invalidated target PDEs */ + PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0); +#endif + MakeKernelPageReadWrite(psMMUContext->pvPDCpuVAddr); + /* copy over the PDEs */ + pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry]; + MakeKernelPageReadOnly(psMMUContext->pvPDCpuVAddr); + if (pui32PDCpuVAddr[ui32PDEntry]) + { + /* Ensure the shared heap allocation is mapped into the context/PD + * for the active pdump process/app. The PTs and backing physical + * should also be pdumped (elsewhere). + * MALLOC (PT) + * LDB (init PT) + * MALLOC (data page) + * WRW (PTE->data page) + * LDB (init data page) -- could be useful to ensure page is initialised + */ + #if defined(PDUMP) + //PDUMPCOMMENT("MMU_InsertHeap: Mapping shared heap to new context %d (%s)", psMMUContext->ui32PDumpMMUContextID, (psMMUContext->bPDumpActive) ? "active" : ""); + #if defined(SUPPORT_PDUMP_MULTI_PROCESS) + if(psMMUContext->bPDumpActive) + #endif /* SUPPORT_PDUMP_MULTI_PROCESS */ + { + PDUMPPDENTRIES(&psMMUHeap->sMMUAttrib, psMMUContext->hPDOSMemHandle, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + } + #endif +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + bInvalidateDirectoryCache = IMG_TRUE; +#endif + } + } + +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(psMMUContext); +#endif + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + if (bInvalidateDirectoryCache) + { + /* This is actually not to do with multiple mem contexts, but to do with the directory cache. + In the 1 context implementation of the MMU, the directory "cache" is actually a copy of the + page directory memory, and requires updating whenever the page directory changes, even if there + was no previous value in a particular entry + */ + MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo); + } +#endif +} + + +/*! +****************************************************************************** + FUNCTION: MMU_UnmapPagesAndFreePTs + + PURPOSE: unmap pages, invalidate virtual address and try to free the PTs + + PARAMETERS: In: psMMUHeap - the mmu. + In: sDevVAddr - the device virtual address. + In: ui32PageCount - page count + In: hUniqueTag - A unique ID for use as a tag identifier + + RETURNS: None +******************************************************************************/ +static IMG_VOID +MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_HANDLE hUniqueTag) +{ + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 i; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32Tmp; + IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE; + +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + /* setup tmp devvaddr to base of allocation */ + sTmpDevVAddr = sDevVAddr; + + for(i=0; i<ui32PageCount; i++) + { + MMU_PT_INFO **ppsPTInfoList; + + /* find the index/offset in PD entries */ + ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + { + /* find the index/offset of the first PT in the first PT page */ + ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift; + + /* Is the PT page valid? */ + if (!ppsPTInfoList[0]) + { + /* + With sparse mappings we expect that the PT could be freed + before we reach the end of it as the unmapped pages don't + bump ui32ValidPTECount so it can reach zero before we reach + the end of the PT. + */ + if (!psMMUHeap->bHasSparseMappings) + { + PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex )); + } + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize; + + /* Try to unmap the remaining allocation pages */ + continue; + } + + /* setup pointer to the first entry in the PT page */ + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + + /* Is PTPageCpuVAddr valid ? */ + if (!pui32Tmp) + { + continue; + } + + CheckPT(ppsPTInfoList[0]); + + /* Decrement the valid page count only if the current page is valid*/ + if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) + { + ppsPTInfoList[0]->ui32ValidPTECount--; + } + else + { + if (!psMMUHeap->bHasSparseMappings) + { + PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex )); + } + } + + /* The page table count should not go below zero */ + PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0); + MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PT entry to the dummy data page */ + pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; +#else + /* invalidate entry */ +#if defined(FIX_HW_BRN_31620) + BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]); +#else + pui32Tmp[ui32PTIndex] = 0; +#endif +#endif + MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr); + CheckPT(ppsPTInfoList[0]); + } + + /* + Free a page table if we can. + */ + if (ppsPTInfoList[0] && (ppsPTInfoList[0]->ui32ValidPTECount == 0) + ) + { +#if defined(FIX_HW_BRN_31620) + if (BRN31620FreePageTable(psMMUHeap, ui32PDIndex) == IMG_TRUE) + { + bInvalidateDirectoryCache = IMG_TRUE; + } +#else + _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE); + bInvalidateDirectoryCache = IMG_TRUE; +#endif + } + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize; + } + + if(bInvalidateDirectoryCache) + { + MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo); + } + else + { + MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo); + } + +#if defined(PDUMP) + MMU_PDumpPageTables(psMMUHeap, + sDevVAddr, + psMMUHeap->ui32DataPageSize * ui32PageCount, + IMG_TRUE, + hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_FreePageTables + + PURPOSE: Call back from RA_Free to zero page table entries used by freed + spans. + + PARAMETERS: In: pvMMUHeap + In: ui32Start + In: ui32End + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: +******************************************************************************/ +static IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap, + IMG_SIZE_T ui32Start, + IMG_SIZE_T ui32End, + IMG_HANDLE hUniqueTag) +{ + MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap; + IMG_DEV_VIRTADDR Start; + + Start.uiAddr = (IMG_UINT32)ui32Start; + + MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (IMG_UINT32)((ui32End - ui32Start) >> pMMUHeap->ui32PTShift), hUniqueTag); +} + +/*! +****************************************************************************** + FUNCTION: MMU_Create + + PURPOSE: Create an mmu device virtual heap. + + PARAMETERS: In: psMMUContext - MMU context + In: psDevArena - device memory resource arena + Out: ppsVMArena - virtual mapping arena + RETURNS: MMU_HEAP + RETURNS: +******************************************************************************/ +MMU_HEAP * +MMU_Create (MMU_CONTEXT *psMMUContext, + DEV_ARENA_DESCRIPTOR *psDevArena, + RA_ARENA **ppsVMArena, + PDUMP_MMU_ATTRIB **ppsMMUAttrib) +{ + MMU_HEAP *pMMUHeap; + IMG_UINT32 ui32ScaleSize; + + PVR_UNREFERENCED_PARAMETER(ppsMMUAttrib); + + PVR_ASSERT (psDevArena != IMG_NULL); + + if (psDevArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter")); + return IMG_NULL; + } + + OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof (MMU_HEAP), + (IMG_VOID **)&pMMUHeap, IMG_NULL, + "MMU Heap"); + if (pMMUHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed")); + return IMG_NULL; + } + + pMMUHeap->psMMUContext = psMMUContext; + pMMUHeap->psDevArena = psDevArena; + + /* + generate page table and data page mask and shift values + based on the data page size + */ + switch(pMMUHeap->psDevArena->ui32DataPageSize) + { + case 0x1000: + ui32ScaleSize = 0; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K; + break; +#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) + case 0x4000: + ui32ScaleSize = 2; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K; + break; + case 0x10000: + ui32ScaleSize = 4; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K; + break; + case 0x40000: + ui32ScaleSize = 6; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K; + break; + case 0x100000: + ui32ScaleSize = 8; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M; + break; + case 0x400000: + ui32ScaleSize = 10; + pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M; + break; +#endif /* #if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE) */ + default: + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size")); + goto ErrorFreeHeap; + } + + /* number of bits of address offset into the data page */ + pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize; + pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize; + pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1; + /* number of bits of address indexing into a pagetable */ + pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth; + pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize; + pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize); + pMMUHeap->ui32PTSize = (IMG_UINT32)(1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32); + + /* note: PT size must be at least 4 entries, even for 4Mb data page size */ + if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32)) + { + pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32); + } + pMMUHeap->ui32PTNumEntriesAllocated = pMMUHeap->ui32PTSize >> 2; + + /* find the number of actual PT entries per PD entry range. For 4MB data + * pages we only use the first entry although the PT has 16 byte allocation/alignment + * (due to 4 LSbits of the PDE are reserved for control) */ + pMMUHeap->ui32PTNumEntriesUsable = (IMG_UINT32)(1UL << pMMUHeap->ui32PTBitWidth); + + /* number of bits of address indexing into a page directory */ + pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift; + pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth; + pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE)); + + /* External system cache violates this rule */ +#if !defined (SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* + The heap must start on a PT boundary to avoid PT sharing across heaps + The only exception is the first heap which can start at any address + from 0 to the end of the first PT boundary + */ + if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask)) + { + /* + if for some reason the first heap starts after the end of the first PT boundary + but is not aligned to a PT boundary then the assert will trigger unncessarily + */ + PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr + & (pMMUHeap->ui32DataPageMask + | pMMUHeap->ui32PTMask)) == 0); + } +#endif + /* how many PT entries do we need? */ + pMMUHeap->ui32PTETotalUsable = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift; + + /* calculate the PD Base index for the Heap (required for page mapping) */ + pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift; + + /* + how many page tables? + round up to nearest entries to the nearest page table sized block + */ + pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotalUsable + pMMUHeap->ui32PTNumEntriesUsable - 1) + >> pMMUHeap->ui32PTBitWidth; + PVR_ASSERT(pMMUHeap->ui32PageTableCount > 0); + + /* Create the arena */ + pMMUHeap->psVMArena = RA_Create(psDevArena->pszName, + psDevArena->BaseDevVAddr.uiAddr, + psDevArena->ui32Size, + IMG_NULL, + MAX(HOST_PAGESIZE(), pMMUHeap->ui32DataPageSize), + IMG_NULL, + IMG_NULL, + &MMU_FreePageTables, + pMMUHeap); + + if (pMMUHeap->psVMArena == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed")); + goto ErrorFreePagetables; + } + +#if defined(PDUMP) + /* setup per-heap PDUMP MMU attributes */ + MMU_SetPDumpAttribs(&pMMUHeap->sMMUAttrib, + psMMUContext->psDeviceNode, + pMMUHeap->ui32DataPageMask, + pMMUHeap->ui32PTSize); + *ppsMMUAttrib = &pMMUHeap->sMMUAttrib; + + PDUMPCOMMENT("Create MMU device from arena %s (Size == 0x%x, DataPageSize == 0x%x, BaseDevVAddr == 0x%x)", + psDevArena->pszName, + psDevArena->ui32Size, + pMMUHeap->ui32DataPageSize, + psDevArena->BaseDevVAddr.uiAddr); +#endif /* PDUMP */ + + /* + And return the RA for VM arena management + */ + *ppsVMArena = pMMUHeap->psVMArena; + + return pMMUHeap; + + /* drop into here if errors */ +ErrorFreePagetables: + _DeferredFreePageTables (pMMUHeap); + +ErrorFreeHeap: + OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL); + /*not nulling pointer, out of scope*/ + + return IMG_NULL; +} + +/*! +****************************************************************************** + FUNCTION: MMU_Delete + + PURPOSE: Delete an MMU device virtual heap. + + PARAMETERS: In: pMMUHeap - The MMU heap to delete. + RETURNS: +******************************************************************************/ +IMG_VOID +MMU_Delete (MMU_HEAP *pMMUHeap) +{ + if (pMMUHeap != IMG_NULL) + { + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete")); + + if(pMMUHeap->psVMArena) + { + RA_Delete (pMMUHeap->psVMArena); + } + +#if defined(PDUMP) + PDUMPCOMMENT("Delete MMU device from arena %s (BaseDevVAddr == 0x%x, PT count for deferred free == 0x%x)", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->BaseDevVAddr.uiAddr, + pMMUHeap->ui32PageTableCount); +#endif /* PDUMP */ + +#ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(pMMUHeap->psMMUContext); +#endif + _DeferredFreePageTables (pMMUHeap); +#ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(pMMUHeap->psMMUContext); +#endif + + OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } +} + +/*! +****************************************************************************** + FUNCTION: MMU_Alloc + PURPOSE: Allocate space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to allocate on. + In: uSize - Size in bytes to allocate. + Out: pActualSize - If non null receives actual size allocated. + In: uFlags - Allocation flags. + In: uDevVAddrAlignment - Required alignment. + Out: DevVAddr - Receives base address of allocation. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +******************************************************************************/ +IMG_BOOL +MMU_Alloc (MMU_HEAP *pMMUHeap, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + IMG_UINT32 uFlags, + IMG_UINT32 uDevVAddrAlignment, + IMG_DEV_VIRTADDR *psDevVAddr) +{ + IMG_BOOL bStatus; + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x", + uSize, uFlags, uDevVAddrAlignment)); + + /* + Only allocate a VM address if the caller did not supply one + */ + if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) + { + IMG_UINTPTR_T uiAddr; + + bStatus = RA_Alloc (pMMUHeap->psVMArena, + uSize, + pActualSize, + IMG_NULL, + 0, + uDevVAddrAlignment, + 0, + IMG_NULL, + 0, + &uiAddr); + if(!bStatus) + { + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed")); + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Alloc of DevVAddr failed from heap %s ID%d", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID)); + return bStatus; + } + + psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr); + } + + #ifdef SUPPORT_SGX_MMU_BYPASS + EnableHostAccess(pMMUHeap->psMMUContext); + #endif + + /* allocate page tables to cover allocation as required */ + bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, (IMG_UINT32)uSize); + + #ifdef SUPPORT_SGX_MMU_BYPASS + DisableHostAccess(pMMUHeap->psMMUContext); + #endif + + if (!bStatus) + { + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed")); + PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to alloc pagetable(s) for DevVAddr 0x%8.8x from heap %s ID%d", + psDevVAddr->uiAddr, + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID)); + if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0) + { + /* free the VM address */ + RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE); + } + } + + return bStatus; +} + +/*! +****************************************************************************** + FUNCTION: MMU_Free + PURPOSE: Free space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to deallocate on. + In: DevVAddr - Base address to deallocate. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size) +{ + PVR_ASSERT (pMMUHeap != IMG_NULL); + + if (pMMUHeap == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter")); + return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr 0x%08X from heap %s ID%d", + DevVAddr.uiAddr, + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID)); + + if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) && + (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size)) + { + RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE); + return; + } + + PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't free DevVAddr %08X from heap %s ID%d (not in range of heap))", + DevVAddr.uiAddr, + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID)); +} + +/*! +****************************************************************************** + FUNCTION: MMU_Enable + + PURPOSE: Enable an mmu. Establishes pages tables and takes the mmu out + of bypass and waits for the mmu to acknowledge enabled. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Enable (MMU_HEAP *pMMUHeap) +{ + PVR_UNREFERENCED_PARAMETER(pMMUHeap); + /* SGX mmu is always enabled (stub function) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_Disable + + PURPOSE: Disable an mmu, takes the mmu into bypass. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Disable (MMU_HEAP *pMMUHeap) +{ + PVR_UNREFERENCED_PARAMETER(pMMUHeap); + /* SGX mmu is always enabled (stub function) */ +} + +#if defined(FIX_HW_BRN_31620) +/*! +****************************************************************************** + FUNCTION: MMU_GetCacheFlushRange + + PURPOSE: Gets device physical address of the mmu context. + + PARAMETERS: In: pMMUContext - the mmu context + Out: pui32RangeMask - Bit mask showing which PD cache + lines have changed + RETURNS: None +******************************************************************************/ + +IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask) +{ + IMG_UINT32 i; + + for (i=0;i<BRN31620_CACHE_FLUSH_INDEX_SIZE;i++) + { + pui32RangeMask[i] = pMMUContext->ui32PDChangeMask[i]; + + /* Clear bit mask for the next set of allocations */ + pMMUContext->ui32PDChangeMask[i] = 0; + } +} + +/*! +****************************************************************************** + FUNCTION: MMU_GetPDPhysAddr + + PURPOSE: Gets device physical address of the mmu contexts PD. + + PARAMETERS: In: pMMUContext - the mmu context + Out: psDevPAddr - Address of PD + RETURNS: None +******************************************************************************/ + +IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr) +{ + *psDevPAddr = pMMUContext->sPDDevPAddr; +} + +#endif +#if defined(PDUMP) +/*! +****************************************************************************** + FUNCTION: MMU_PDumpPageTables + + PURPOSE: PDump the linear mapping for a range of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: uSize - size of memory range in bytes + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +static IMG_VOID +MMU_PDumpPageTables (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SIZE_T uSize, + IMG_BOOL bForUnmap, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 ui32NumPTEntries; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32PTEntry; + + MMU_PT_INFO **ppsPTInfoList; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTDumpCount; + +#if defined(FIX_HW_BRN_31620) + PVRSRV_SGXDEV_INFO *psDevInfo = pMMUHeap->psMMUContext->psDevInfo; +#endif + /* find number of PT entries to dump */ + ui32NumPTEntries = (IMG_UINT32)((uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift); + + /* find the index/offset in PD entries */ + ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* set the base PT info */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + /* find the index/offset of the first PT entry in the first PT page */ + ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift; + + /* pdump the PT Page modification */ + PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : ""); + + /* walk the PT pages, dumping as we go */ + while(ui32NumPTEntries > 0) + { + MMU_PT_INFO* psPTInfo = *ppsPTInfoList++; + + if(ui32NumPTEntries <= pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex) + { + ui32PTDumpCount = ui32NumPTEntries; + } + else + { + ui32PTDumpCount = pMMUHeap->ui32PTNumEntriesUsable - ui32PTIndex; + } + + if (psPTInfo) + { +#if defined(FIX_HW_BRN_31620) + IMG_UINT32 i; +#endif + IMG_UINT32 ui32Flags = 0; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + ui32Flags |= ( MMU_IsHeapShared(pMMUHeap) ) ? PDUMP_FLAGS_PERSISTENT : 0; +#endif + pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr; +#if defined(FIX_HW_BRN_31620) + if ((ui32PDIndex % (BRN31620_PDE_CACHE_FILL_SIZE/BRN31620_PT_ADDRESS_RANGE_SIZE)) == BRN31620_DUMMY_PDE_INDEX) + { + for (i=ui32PTIndex;i<(ui32PTIndex + ui32PTDumpCount);i++) + { + if (pui32PTEntry[i] == ((psDevInfo->sBRN31620DummyPageDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_DUMMY_PAGE + | SGX_MMU_PTE_READONLY + | SGX_MMU_PTE_VALID)) + { + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG); + } + else + { + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[i], sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag); + } + } + } + else +#endif + { + PDUMPMEMPTENTRIES(&pMMUHeap->sMMUAttrib, psPTInfo->hPTPageOSMemHandle, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), ui32Flags, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag); + } + } + + /* decrement PT entries left */ + ui32NumPTEntries -= ui32PTDumpCount; + + /* reset offset in page */ + ui32PTIndex = 0; + +#if defined(FIX_HW_BRN_31620) + /* For 31620 we need to know which PD index we're working on */ + ui32PDIndex++; +#endif + } + + PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : ""); +} +#endif /* #if defined(PDUMP) */ + + +/*! +****************************************************************************** + FUNCTION: MMU_MapPage + + PURPOSE: Create a mapping for one page at a specified virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: DevPAddr - the device physical address of the page to map. + In: ui32MemFlags - BM r/w/cache flags + RETURNS: None +******************************************************************************/ +static IMG_VOID +MMU_MapPage (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_DEV_PHYADDR DevPAddr, + IMG_UINT32 ui32MemFlags) +{ + IMG_UINT32 ui32Index; + IMG_UINT32 *pui32Tmp; + IMG_UINT32 ui32MMUFlags = 0; + MMU_PT_INFO **ppsPTInfoList; + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + /* + unravel the read/write/cache flags + */ + if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE)) + { + /* read/write */ + ui32MMUFlags = 0; + } + else if(PVRSRV_MEM_READ & ui32MemFlags) + { + /* read only */ + ui32MMUFlags |= SGX_MMU_PTE_READONLY; + } + else if(PVRSRV_MEM_WRITE & ui32MemFlags) + { + /* write only */ + ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY; + } + + /* cache coherency */ + if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags) + { + ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT; + } + +#if !defined(FIX_HW_BRN_25503) + /* EDM protection */ + if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags) + { + ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT; + } +#endif + + /* + we receive a device physical address for the page that is to be mapped + and a device virtual address representing where it should be mapped to + */ + + /* find the index/offset in PD entries */ + ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index]; + + CheckPT(ppsPTInfoList[0]); + + /* find the index/offset of the first PT in the first PT page */ + ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift; + + /* setup pointer to the first entry in the PT page */ + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + +#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + { + IMG_UINT32 uTmp = pui32Tmp[ui32Index]; + + /* Is the current page already valid? (should not be unless it was allocated and not deallocated) */ +#if defined(FIX_HW_BRN_31620) + if ((uTmp & SGX_MMU_PTE_VALID) && ((DevVAddr.uiAddr & BRN31620_PDE_CACHE_FILL_MASK) != BRN31620_DUMMY_PAGE_OFFSET)) +#else + if ((uTmp & SGX_MMU_PTE_VALID) != 0) +#endif + + { + PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08X PDIdx:%u PTIdx:%u", + DevVAddr.uiAddr, + DevVAddr.uiAddr >> pMMUHeap->ui32PDShift, + ui32Index )); + PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08X", uTmp)); + PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x%08X", DevPAddr.uiAddr)); +#if PT_DUMP + DumpPT(ppsPTInfoList[0]); +#endif + } +#if !defined(FIX_HW_BRN_31620) + PVR_ASSERT((uTmp & SGX_MMU_PTE_VALID) == 0); +#endif + } +#endif + + /* One more valid entry in the page table. */ + ppsPTInfoList[0]->ui32ValidPTECount++; + + MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr); + /* map in the physical page */ + pui32Tmp[ui32Index] = ((DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)) + | SGX_MMU_PTE_VALID + | ui32MMUFlags; + MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr); + CheckPT(ppsPTInfoList[0]); +} + + +/*! +****************************************************************************** + FUNCTION: MMU_MapScatter + + PURPOSE: Create a linear mapping for a range of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: psSysAddr - the device physical address of the page to + map. + In: uSize - size of memory range in bytes + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapScatter (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR *psSysAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ +#if defined(PDUMP) + IMG_DEV_VIRTADDR MapBaseDevVAddr; +#endif /*PDUMP*/ + IMG_UINT32 uCount, i; + IMG_DEV_PHYADDR DevPAddr; + + PVR_ASSERT (pMMUHeap != IMG_NULL); + +#if defined(PDUMP) + MapBaseDevVAddr = DevVAddr; +#else + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif /*PDUMP*/ + + for (i=0, uCount=0; uCount<uSize; i++, uCount+=pMMUHeap->ui32DataPageSize) + { + IMG_SYS_PHYADDR sSysAddr; + + sSysAddr = psSysAddr[i]; + + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr); + + MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags); + DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize; + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x", + DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize)); + } + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_MapPages + + PURPOSE: Create a linear mapping for a ranege of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to + map. + In: uSize - size of memory range in bytes + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPages (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_DEV_PHYADDR DevPAddr; +#if defined(PDUMP) + IMG_DEV_VIRTADDR MapBaseDevVAddr; +#endif /*PDUMP*/ + IMG_UINT32 uCount; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + + PVR_ASSERT (pMMUHeap != IMG_NULL); + + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPages: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=%08X, size=0x%x", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID, + DevVAddr.uiAddr, + SysPAddr.uiAddr, + uSize)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + +#if defined(PDUMP) + MapBaseDevVAddr = DevVAddr; +#else + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif /*PDUMP*/ + + DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr); + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + /* + for dummy allocations there is only one physical + page backing the virtual range + */ + if(ui32MemFlags & PVRSRV_MEM_DUMMY) + { + ui32PAdvance = 0; + } + + for (uCount=0; uCount<uSize; uCount+=ui32VAdvance) + { + MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags); + DevVAddr.uiAddr += ui32VAdvance; + DevPAddr.uiAddr += ui32PAdvance; + } + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_MapPagesSparse + + PURPOSE: Create a linear mapping for a ranege of pages at a specified + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to + map. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPagesSparse (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_DEV_PHYADDR DevPAddr; +#if defined(PDUMP) + IMG_DEV_VIRTADDR MapBaseDevVAddr; +#endif /*PDUMP*/ + IMG_UINT32 uCount; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + IMG_SIZE_T uSizeVM = ui32ChunkSize * ui32NumVirtChunks; +#if !defined(PVRSRV_NEED_PVR_DPF) + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); +#endif + + PVR_ASSERT (pMMUHeap != IMG_NULL); + + PVR_DPF ((PVR_DBG_MESSAGE, "MMU_MapPagesSparse: heap:%s, heap_id:%d devVAddr=%08X, SysPAddr=%08X, VM space=0x%x, PHYS space=0x%x", + pMMUHeap->psDevArena->pszName, + pMMUHeap->psDevArena->ui32HeapID, + DevVAddr.uiAddr, + SysPAddr.uiAddr, + uSizeVM, + ui32ChunkSize * ui32NumPhysChunks)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + +#if defined(PDUMP) + MapBaseDevVAddr = DevVAddr; +#else + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif /*PDUMP*/ + + DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr); + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + /* + for dummy allocations there is only one physical + page backing the virtual range + */ + if(ui32MemFlags & PVRSRV_MEM_DUMMY) + { + ui32PAdvance = 0; + } + + for (uCount=0; uCount<uSizeVM; uCount+=ui32VAdvance) + { + if (pabMapChunk[uCount/ui32ChunkSize]) + { + MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags); + DevPAddr.uiAddr += ui32PAdvance; + } + DevVAddr.uiAddr += ui32VAdvance; + } + pMMUHeap->bHasSparseMappings = IMG_TRUE; + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSizeVM, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_MapShadow + + PURPOSE: Create a mapping for a range of pages from either a CPU + virtual adddress, (or if NULL a hOSMemHandle) to a specified + device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: uByteSize - A page aligned mapping length in bytes. + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated - It used to return a byte aligned + device virtual address corresponding to the + cpu virtual address (When CpuVAddr wasn't + constrained to be page aligned.) Now it just + returns MapBaseDevVAddr. Unaligned semantics + can easily be handled above this API if required. + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadow (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_SIZE_T uByteSize, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR *pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 i; + IMG_UINT32 uOffset = 0; + IMG_DEV_VIRTADDR MapDevVAddr; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_MapShadow: DevVAddr:%08X, Bytes:0x%x, CPUVAddr:%08X", + MapBaseDevVAddr.uiAddr, + uByteSize, + (IMG_UINTPTR_T)CpuVAddr)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + + /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */ + PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); + PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0); + pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr; + + /* + for dummy allocations there is only one physical + page backing the virtual range + */ + if(ui32MemFlags & PVRSRV_MEM_DUMMY) + { + ui32PAdvance = 0; + } + + /* Loop through cpu memory and map page by page */ + MapDevVAddr = MapBaseDevVAddr; + for (i=0; i<uByteSize; i+=ui32VAdvance) + { + IMG_CPU_PHYADDR CpuPAddr; + IMG_DEV_PHYADDR DevPAddr; + + if(CpuVAddr) + { + CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle, + (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset)); + } + else + { + CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset); + } + DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr); + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + PVR_DPF ((PVR_DBG_MESSAGE, + "Offset=0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X", + uOffset, + (IMG_UINTPTR_T)CpuVAddr + uOffset, + CpuPAddr.uiAddr, + MapDevVAddr.uiAddr, + DevPAddr.uiAddr)); + + MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags); + + /* loop update */ + MapDevVAddr.uiAddr += ui32VAdvance; + uOffset += ui32PAdvance; + } + +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_MapShadowSparse + + PURPOSE: Create a mapping for a range of pages from either a CPU + virtual adddress, (or if NULL a hOSMemHandle) to a specified + device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated - It used to return a byte aligned + device virtual address corresponding to the + cpu virtual address (When CpuVAddr wasn't + constrained to be page aligned.) Now it just + returns MapBaseDevVAddr. Unaligned semantics + can easily be handled above this API if required. + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadowSparse (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR *pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 i; + IMG_UINT32 uOffset = 0; + IMG_DEV_VIRTADDR MapDevVAddr; + IMG_UINT32 ui32VAdvance; + IMG_UINT32 ui32PAdvance; + IMG_SIZE_T uiSizeVM = ui32ChunkSize * ui32NumVirtChunks; + IMG_UINT32 ui32ChunkIndex = 0; + IMG_UINT32 ui32ChunkOffset = 0; +#if !defined(PVRSRV_NEED_PVR_DPF) + PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); +#endif +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + + PVR_DPF ((PVR_DBG_MESSAGE, + "MMU_MapShadowSparse: DevVAddr:%08X, VM space:0x%x, CPUVAddr:%08X PHYS space:0x%x", + MapBaseDevVAddr.uiAddr, + uiSizeVM, + (IMG_UINTPTR_T)CpuVAddr, + ui32ChunkSize * ui32NumPhysChunks)); + + /* set the virtual and physical advance */ + ui32VAdvance = pMMUHeap->ui32DataPageSize; + ui32PAdvance = pMMUHeap->ui32DataPageSize; + + /* note: can't do useful check on the CPU Addr other than it being at least 4k alignment */ + PVR_ASSERT(((IMG_UINTPTR_T)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0); + PVR_ASSERT(((IMG_UINT32)uiSizeVM & pMMUHeap->ui32DataPageMask) == 0); + pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr; + + /* Shouldn't come through the sparse interface */ + PVR_ASSERT((ui32MemFlags & PVRSRV_MEM_DUMMY) == 0); + + /* Loop through cpu memory and map page by page */ + MapDevVAddr = MapBaseDevVAddr; + for (i=0; i<uiSizeVM; i+=ui32VAdvance) + { + IMG_CPU_PHYADDR CpuPAddr; + IMG_DEV_PHYADDR DevPAddr; + + if (pabMapChunk[i/ui32ChunkSize]) + /*if (pabMapChunk[ui32ChunkIndex])*/ + { + if(CpuVAddr) + { + CpuPAddr = OSMapLinToCPUPhys (hOSMemHandle, + (IMG_VOID *)((IMG_UINTPTR_T)CpuVAddr + uOffset)); + } + else + { + CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset); + } + DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr); + + /* check the physical alignment of the memory to map */ + PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0); + + PVR_DPF ((PVR_DBG_MESSAGE, + "Offset=0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X", + uOffset, + (IMG_UINTPTR_T)CpuVAddr + uOffset, + CpuPAddr.uiAddr, + MapDevVAddr.uiAddr, + DevPAddr.uiAddr)); + + MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags); + uOffset += ui32PAdvance; + } + + /* loop update */ + MapDevVAddr.uiAddr += ui32VAdvance; + + if (ui32ChunkOffset == ui32ChunkSize) + { + ui32ChunkIndex++; + ui32ChunkOffset = 0; + } + } + + pMMUHeap->bHasSparseMappings = IMG_TRUE; +#if defined(PDUMP) + MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uiSizeVM, IMG_FALSE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + +/*! +****************************************************************************** + FUNCTION: MMU_UnmapPages + + PURPOSE: unmap pages and invalidate virtual address + + PARAMETERS: In: psMMUHeap - the mmu. + In: sDevVAddr - the device virtual address. + In: ui32PageCount - page count + In: hUniqueTag - A unique ID for use as a tag identifier + + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_UnmapPages (MMU_HEAP *psMMUHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_HANDLE hUniqueTag) +{ + IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize; + IMG_DEV_VIRTADDR sTmpDevVAddr; + IMG_UINT32 i; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32Tmp; + +#if !defined (PDUMP) + PVR_UNREFERENCED_PARAMETER(hUniqueTag); +#endif + + /* setup tmp devvaddr to base of allocation */ + sTmpDevVAddr = sDevVAddr; + + for(i=0; i<ui32PageCount; i++) + { + MMU_PT_INFO **ppsPTInfoList; + + /* find the index/offset in PD entries */ + ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; + + /* find the index/offset of the first PT in the first PT page */ + ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift; + + /* Is the PT page valid? */ + if ((!ppsPTInfoList[0]) && (!psMMUHeap->bHasSparseMappings)) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08X (VaddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u", + sTmpDevVAddr.uiAddr, + sDevVAddr.uiAddr, + i, + ui32PDIndex, + ui32PTIndex)); + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += uPageSize; + + /* Try to unmap the remaining allocation pages */ + continue; + } + + CheckPT(ppsPTInfoList[0]); + + /* setup pointer to the first entry in the PT page */ + pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + + /* Decrement the valid page count only if the current page is valid*/ + if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) + { + ppsPTInfoList[0]->ui32ValidPTECount--; + } + else + { + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08X (VAddrIni:0x%08X AllocPage:%u) PDIdx:%u PTIdx:%u", + sTmpDevVAddr.uiAddr, + sDevVAddr.uiAddr, + i, + ui32PDIndex, + ui32PTIndex)); + PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08X", pui32Tmp[ui32PTIndex])); + } + + /* The page table count should not go below zero */ + PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0); + + MakeKernelPageReadWrite(ppsPTInfoList[0]->PTPageCpuVAddr); +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* point the PT entry to the dummy data page */ + pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; +#else + /* invalidate entry */ +#if defined(FIX_HW_BRN_31620) + BRN31620InvalidatePageTableEntry(psMMUHeap->psMMUContext, ui32PDIndex, ui32PTIndex, &pui32Tmp[ui32PTIndex]); +#else + pui32Tmp[ui32PTIndex] = 0; +#endif +#endif + MakeKernelPageReadOnly(ppsPTInfoList[0]->PTPageCpuVAddr); + + CheckPT(ppsPTInfoList[0]); + + /* advance the sTmpDevVAddr by one page */ + sTmpDevVAddr.uiAddr += uPageSize; + } + + MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo); + +#if defined(PDUMP) + MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag); +#endif /* #if defined(PDUMP) */ +} + + +/*! +****************************************************************************** + FUNCTION: MMU_GetPhysPageAddr + + PURPOSE: extracts physical address from MMU page tables + + PARAMETERS: In: pMMUHeap - the mmu + PARAMETERS: In: sDevVPageAddr - the virtual address to extract physical + page mapping from + RETURNS: None +******************************************************************************/ +IMG_DEV_PHYADDR +MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr) +{ + IMG_UINT32 *pui32PageTable; + IMG_UINT32 ui32Index; + IMG_DEV_PHYADDR sDevPAddr; + MMU_PT_INFO **ppsPTInfoList; + + /* find the index/offset in PD entries */ + ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift; + + /* and advance to the first PT info list */ + ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index]; + if (!ppsPTInfoList[0]) + { + /* Heaps with sparse mappings are allowed invalid pages */ + if (!pMMUHeap->bHasSparseMappings) + { + PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr)); + } + sDevPAddr.uiAddr = 0; + return sDevPAddr; + } + + /* find the index/offset of the first PT in the first PT page */ + ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift; + + /* setup pointer to the first entry in the PT page */ + pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr; + + /* read back physical page */ + sDevPAddr.uiAddr = pui32PageTable[ui32Index]; + + /* Mask off non-address bits */ + sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT); + + /* and align the address */ + sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT; + + return sDevPAddr; +} + + +IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext) +{ + return (pMMUContext->sPDDevPAddr); +} + + +/*! +****************************************************************************** + FUNCTION: SGXGetPhysPageAddr + + PURPOSE: Gets DEV and CPU physical address of sDevVAddr + + PARAMETERS: In: hDevMemHeap - device mem heap handle + PARAMETERS: In: sDevVAddr - the base virtual address to unmap from + PARAMETERS: Out: pDevPAddr - DEV physical address + PARAMETERS: Out: pCpuPAddr - CPU physical address + RETURNS: None +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR *pDevPAddr, + IMG_CPU_PHYADDR *pCpuPAddr) +{ + MMU_HEAP *pMMUHeap; + IMG_DEV_PHYADDR DevPAddr; + + /* + Get MMU Heap From hDevMemHeap + */ + pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap); + + DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr); + pCpuPAddr->uiAddr = DevPAddr.uiAddr; /* SysDevPAddrToCPUPAddr(DevPAddr) */ + pDevPAddr->uiAddr = DevPAddr.uiAddr; + + return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS; +} + + +/*! +****************************************************************************** + FUNCTION: SGXGetMMUPDAddrKM + + PURPOSE: Gets PD device physical address of hDevMemContext + + PARAMETERS: In: hDevCookie - device cookie + PARAMETERS: In: hDevMemContext - memory context + PARAMETERS: Out: psPDDevPAddr - MMU PD address + RETURNS: None +******************************************************************************/ +PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_DEV_PHYADDR *psPDDevPAddr) +{ + if (!hDevCookie || !hDevMemContext || !psPDDevPAddr) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* return the address */ + *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + FUNCTION: MMU_BIFResetPDAlloc + + PURPOSE: Allocate a dummy Page Directory, Page Table and Page which can + be used for dynamic dummy page mapping during SGX reset. + Note: since this is only used for hardware recovery, no + pdumping is performed. + + PARAMETERS: In: psDevInfo - device info + RETURNS: PVRSRV_OK or error +******************************************************************************/ +PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + PVRSRV_ERROR eError; + SYS_DATA *psSysData; + RA_ARENA *psLocalDevMemArena; + IMG_HANDLE hOSMemHandle = IMG_NULL; + IMG_BYTE *pui8MemBlock = IMG_NULL; + IMG_SYS_PHYADDR sMemBlockSysPAddr; + IMG_CPU_PHYADDR sMemBlockCpuPAddr; + + SysAcquireData(&psSysData); + + psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; + + /* allocate 3 pages - for the PD, PT and dummy page */ + if(psLocalDevMemArena == IMG_NULL) + { + /* UMA system */ + eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + 3 * SGX_MMU_PAGE_SIZE, + SGX_MMU_PAGE_SIZE, + IMG_NULL, + 0, + IMG_NULL, + (IMG_VOID **)&pui8MemBlock, + &hOSMemHandle); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed")); + return eError; + } + + /* translate address to device physical */ + if(pui8MemBlock) + { + sMemBlockCpuPAddr = OSMapLinToCPUPhys(hOSMemHandle, + pui8MemBlock); + } + else + { + /* This isn't used in all cases since not all ports currently support + * OSMemHandleToCpuPAddr() */ + sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0); + } + } + else + { + /* non-UMA system */ + + if(RA_Alloc(psLocalDevMemArena, + 3 * SGX_MMU_PAGE_SIZE, + IMG_NULL, + IMG_NULL, + 0, + SGX_MMU_PAGE_SIZE, + 0, + IMG_NULL, + 0, + &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* derive the CPU virtual address */ + sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr); + pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr, + SGX_MMU_PAGE_SIZE * 3, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + &hOSMemHandle); + if(!pui8MemBlock) + { + PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables")); + return PVRSRV_ERROR_BAD_MAPPING; + } + } + + psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle; + psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr); + psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE; + psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE; + /* override pointer cast warnings */ + /* PRQA S 3305,509 2 */ + psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock; + psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE); + + /* Invalidate entire PD and PT. */ + OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE); + OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE); + /* Fill dummy page with markers. */ + OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + FUNCTION: MMU_BIFResetPDFree + + PURPOSE: Free resources allocated in MMU_BIFResetPDAlloc. + + PARAMETERS: In: psDevInfo - device info + RETURNS: +******************************************************************************/ +IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + SYS_DATA *psSysData; + RA_ARENA *psLocalDevMemArena; + IMG_SYS_PHYADDR sPDSysPAddr; + + SysAcquireData(&psSysData); + + psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; + + /* free the page directory */ + if(psLocalDevMemArena == IMG_NULL) + { + OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, + 3 * SGX_MMU_PAGE_SIZE, + psDevInfo->pui32BIFResetPD, + psDevInfo->hBIFResetPDOSMemHandle); + } + else + { + OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD, + 3 * SGX_MMU_PAGE_SIZE, + PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY, + psDevInfo->hBIFResetPDOSMemHandle); + + sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr); + RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE); + } +} + +IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32FaultAddr) +{ + MMU_CONTEXT *psMMUContext = psDevInfo->pvMMUContextList; + + while (psMMUContext && (psMMUContext->sPDDevPAddr.uiAddr != ui32PDDevPAddr)) + { + psMMUContext = psMMUContext->psNext; + } + + if (psMMUContext) + { + IMG_UINT32 ui32PTIndex; + IMG_UINT32 ui32PDIndex; + + PVR_LOG(("Found MMU context for page fault 0x%08x", ui32FaultAddr)); + PVR_LOG(("GPU memory context is for PID=%d (%s)", psMMUContext->ui32PID, psMMUContext->szName)); + + ui32PTIndex = (ui32FaultAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + ui32PDIndex = (ui32FaultAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PT_SHIFT + SGX_MMU_PAGE_SHIFT); + + if (psMMUContext->apsPTInfoList[ui32PDIndex]) + { + if (psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr) + { + IMG_UINT32 *pui32Ptr = psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + IMG_UINT32 ui32PTE = pui32Ptr[ui32PTIndex]; + + PVR_LOG(("PDE valid: PTE = 0x%08x (PhysAddr = 0x%08x, %s)", + ui32PTE, + ui32PTE & SGX_MMU_PTE_ADDR_MASK, + ui32PTE & SGX_MMU_PTE_VALID?"valid":"Invalid")); + } + else + { + PVR_LOG(("Found PT info but no CPU address")); + } + } + else + { + PVR_LOG(("No PDE found")); + } + } +} + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) +/*! +****************************************************************************** + FUNCTION: MMU_MapExtSystemCacheRegs + + PURPOSE: maps external system cache control registers into SGX MMU + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + IMG_UINT32 *pui32PT; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + PDUMP_MMU_ATTRIB sMMUAttrib; + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + + sMMUAttrib = psDevInfo->sMMUAttrib; +#if defined(PDUMP) + MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode, + SGX_MMU_PAGE_MASK, + SGX_MMU_PT_SIZE * sizeof(IMG_UINT32)); +#endif + +#if defined(PDUMP) + { + IMG_CHAR szScript[128]; + + sprintf(szScript, "MALLOC :EXTSYSCACHE:PA_%08X%08X %u %u 0x%08X\r\n", 0, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, psDevInfo->sExtSysCacheRegsDevPBase.uiAddr); + PDumpOSWriteString2(szScript, PDUMP_FLAGS_CONTINUOUS); + } +#endif + + ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); + ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + + pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + + MakeKernelPageReadWrite(pui32PT); + /* map the PT to the registers */ + pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + MakeKernelPageReadOnly(pui32PT); +#if defined(PDUMP) + /* Add the entery to the PT */ + { + IMG_DEV_PHYADDR sDevPAddr; + IMG_CPU_PHYADDR sCpuPAddr; + IMG_UINT32 ui32PageMask; + IMG_UINT32 ui32PTE; + PVRSRV_ERROR eErr; + + PDUMP_GET_SCRIPT_AND_FILE_STRING(); + + ui32PageMask = sMMUAttrib.ui32PTSize - 1; + sCpuPAddr = OSMapLinToCPUPhys(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->hPTPageOSMemHandle, &pui32PT[ui32PTIndex]); + sDevPAddr = SysCpuPAddrToDevPAddr(sMMUAttrib.sDevId.eDeviceType, sCpuPAddr); + ui32PTE = *((IMG_UINT32 *) (&pui32PT[ui32PTIndex])); + + eErr = PDumpOSBufprintf(hScript, + ui32MaxLenScript, + "WRW :%s:PA_%08X%08X:0x%08X :%s:PA_%08X%08X:0x%08X\r\n", + sMMUAttrib.sDevId.pszPDumpDevName, + (IMG_UINT32)(IMG_UINTPTR_T)PDUMP_PT_UNIQUETAG, + (sDevPAddr.uiAddr) & ~ui32PageMask, + (sDevPAddr.uiAddr) & ui32PageMask, + "EXTSYSCACHE", + (IMG_UINT32)(IMG_UINTPTR_T)PDUMP_PD_UNIQUETAG, + (ui32PTE & sMMUAttrib.ui32PDEMask) << sMMUAttrib.ui32PTEAlignShift, + ui32PTE & ~sMMUAttrib.ui32PDEMask); + if(eErr != PVRSRV_OK) + { + return eErr; + } + PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS); + } +#endif + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + FUNCTION: MMU_UnmapExtSystemCacheRegs + + PURPOSE: unmaps external system cache control registers + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + SYS_DATA *psSysData; + RA_ARENA *psLocalDevMemArena; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32PDIndex; + IMG_UINT32 ui32PTIndex; + IMG_UINT32 *pui32PT; + PDUMP_MMU_ATTRIB sMMUAttrib; + + psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + + sMMUAttrib = psDevInfo->sMMUAttrib; + +#if defined(PDUMP) + MMU_SetPDumpAttribs(&sMMUAttrib, psDeviceNode, + SGX_MMU_PAGE_MASK, + SGX_MMU_PT_SIZE * sizeof(IMG_UINT32)); +#endif + SysAcquireData(&psSysData); + + psLocalDevMemArena = psSysData->apsLocalDevMemArena[0]; + + /* unmap the MMU page table from the PD */ + ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); + ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + + /* Only unmap it if the PT hasn't already been freed */ + if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]) + { + if (psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr) + { + pui32PT = (IMG_UINT32 *) psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->apsPTInfoList[ui32PDIndex]->PTPageCpuVAddr; + } + } + + MakeKernelPageReadWrite(pui32PT); + pui32PT[ui32PTIndex] = 0; + MakeKernelPageReadOnly(pui32PT); + + PDUMPMEMPTENTRIES(&sMMUAttrib, psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->hPDOSMemHandle, &pui32PT[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); + + return PVRSRV_OK; +} +#endif + + +#if PAGE_TEST +/*! +****************************************************************************** + FUNCTION: PageTest + + PURPOSE: Tests page table memory, for use during device bring-up. + + PARAMETERS: In: void* pMem - page address (CPU mapped) + PARAMETERS: In: IMG_DEV_PHYADDR sDevPAddr - page device phys address + RETURNS: None, provides debug output and breaks if an error is detected. +******************************************************************************/ +static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr) +{ + volatile IMG_UINT32 ui32WriteData; + volatile IMG_UINT32 ui32ReadData; + volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem; + IMG_INT n; + IMG_BOOL bOK=IMG_TRUE; + + ui32WriteData = 0xffffffff; + + for (n=0; n<1024; n++) + { + pMem32[n] = ui32WriteData; + ui32ReadData = pMem32[n]; + + if (ui32WriteData != ui32ReadData) + { + // Mem fault + PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) )); + PVR_DBG_BREAK; + bOK = IMG_FALSE; + } + } + + ui32WriteData = 0; + + for (n=0; n<1024; n++) + { + pMem32[n] = ui32WriteData; + ui32ReadData = pMem32[n]; + + if (ui32WriteData != ui32ReadData) + { + // Mem fault + PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) )); + PVR_DBG_BREAK; + bOK = IMG_FALSE; + } + } + + if (bOK) + { + PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr)); + } + else + { + PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr)); + } +} +#endif + +/****************************************************************************** + End of file (mmu.c) +******************************************************************************/ + + diff --git a/pvr-source/services4/srvkm/devices/sgx/mmu.h b/pvr-source/services4/srvkm/devices/sgx/mmu.h new file mode 100644 index 0000000..3c849fc --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/mmu.h @@ -0,0 +1,501 @@ +/*************************************************************************/ /*! +@Title MMU Management +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements basic low level control of MMU. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _MMU_H_ +#define _MMU_H_ + +#include "sgxinfokm.h" + +/* +****************************************************************************** + FUNCTION: MMU_Initialise + + PURPOSE: Initialise the mmu module. + + PARAMETERS: None + RETURNS: PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR +MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr); + +/* +****************************************************************************** + FUNCTION: MMU_Finalise + + PURPOSE: Finalise the mmu module, deallocate all resources. + + PARAMETERS: None. + RETURNS: None. +******************************************************************************/ +IMG_VOID +MMU_Finalise (MMU_CONTEXT *psMMUContext); + + +/* +****************************************************************************** + FUNCTION: MMU_InsertHeap + + PURPOSE: Inserts shared heap into the specified context + from the kernel context + + PARAMETERS: None. + RETURNS: None. +******************************************************************************/ +IMG_VOID +MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_Create + + PURPOSE: Create an mmu device. + + PARAMETERS: In: psMMUContext - + In: psDevArena - + Out: ppsVMArena + RETURNS: MMU_HEAP +******************************************************************************/ +MMU_HEAP * +MMU_Create (MMU_CONTEXT *psMMUContext, + DEV_ARENA_DESCRIPTOR *psDevArena, + RA_ARENA **ppsVMArena, + PDUMP_MMU_ATTRIB **ppsMMUAttrib); + +/* +****************************************************************************** + FUNCTION: MMU_Delete + + PURPOSE: Delete an mmu device. + + PARAMETERS: In: pMMUHeap - The mmu to delete. + RETURNS: +******************************************************************************/ +IMG_VOID +MMU_Delete (MMU_HEAP *pMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_Alloc + PURPOSE: Allocate space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to allocate on. + In: uSize - Size in bytes to allocate. + Out: pActualSize - If non null receives actual size allocated. + In: uFlags - Allocation flags. + In: uDevVAddrAlignment - Required alignment. + Out: pDevVAddr - Receives base address of allocation. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +******************************************************************************/ +IMG_BOOL +MMU_Alloc (MMU_HEAP *pMMUHeap, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + IMG_UINT32 uFlags, + IMG_UINT32 uDevVAddrAlignment, + IMG_DEV_VIRTADDR *pDevVAddr); + +/* +****************************************************************************** + FUNCTION: MMU_Free + PURPOSE: Frees space in an mmu's virtual address space. + PARAMETERS: In: pMMUHeap - MMU to free on. + In: DevVAddr - Base address of allocation. + RETURNS: IMG_TRUE - Success + IMG_FALSE - Failure +******************************************************************************/ +IMG_VOID +MMU_Free (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_UINT32 ui32Size); + +/* +****************************************************************************** + FUNCTION: MMU_Enable + + PURPOSE: Enable an mmu. Establishes pages tables and takes the mmu out + of bypass and waits for the mmu to acknowledge enabled. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Enable (MMU_HEAP *pMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_Disable + + PURPOSE: Disable an mmu, takes the mmu into bypass. + + PARAMETERS: In: pMMUHeap - the mmu + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_Disable (MMU_HEAP *pMMUHeap); + +/* +****************************************************************************** + FUNCTION: MMU_MapPages + + PURPOSE: Create a mapping for a range of pages from a device physical + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to map. + In: uSize - size of memory range in bytes + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPages (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapPagesSparse + + PURPOSE: Create a mapping for a range of pages from a device physical + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: SysPAddr - the system physical address of the page to map. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: ui32MemFlags - page table flags. + In: hUniqueTag - A unique ID for use as a tag identifier + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapPagesSparse (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapShadow + + PURPOSE: Create a mapping for a range of pages from a CPU virtual + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: uByteSize - A page aligned mapping length in bytes. + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadow (MMU_HEAP * pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_SIZE_T uByteSize, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR * pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapShadowSparse + + PURPOSE: Create a mapping for a range of pages from a CPU virtual + adddress to a specified device virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: MapBaseDevVAddr - A page aligned device virtual address + to start mapping from. + In: ui32ChunkSize - Size of the chunk (must be page multiple) + In: ui32NumVirtChunks - Number of virtual chunks + In: ui32NumPhysChunks - Number of physical chunks + In: pabMapChunk - Mapping array + In: CpuVAddr - A page aligned CPU virtual address. + In: hOSMemHandle - An alternative OS specific memory handle + for mapping RAM without a CPU virtual + address + Out: pDevVAddr - deprecated + In: hUniqueTag - A unique ID for use as a tag identifier + In: ui32MemFlags - page table flags. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapShadowSparse (MMU_HEAP * pMMUHeap, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL * pabMapChunk, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR * pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_UnmapPages + + PURPOSE: unmaps pages and invalidates virtual address. + + PARAMETERS: In: psMMUHeap - the mmu. + In: sDevVAddr - the device virtual address. + In: ui32PageCount - page count. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_UnmapPages (MMU_HEAP *psMMUHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32PageCount, + IMG_HANDLE hUniqueTag); + +/* +****************************************************************************** + FUNCTION: MMU_MapScatter + + PURPOSE: Create a mapping for a list of pages to a specified device + virtual address. + + PARAMETERS: In: pMMUHeap - the mmu. + In: DevVAddr - the device virtual address. + In: psSysAddr - the list of physical addresses of the pages to + map. + RETURNS: None +******************************************************************************/ +IMG_VOID +MMU_MapScatter (MMU_HEAP *pMMUHeap, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR *psSysAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + + +/* +****************************************************************************** + FUNCTION: MMU_GetPhysPageAddr + + PURPOSE: extracts physical address from MMU page tables + + PARAMETERS: In: pMMUHeap - the mmu + PARAMETERS: In: sDevVPageAddr - the virtual address to extract physical + page mapping from + RETURNS: IMG_DEV_PHYADDR +******************************************************************************/ +IMG_DEV_PHYADDR +MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr); + + +/* +****************************************************************************** + FUNCTION: MMU_GetPDDevPAddr + + PURPOSE: returns PD given the MMU context (SGX to MMU API) + + PARAMETERS: In: pMMUContext - the mmu + RETURNS: IMG_DEV_PHYADDR +******************************************************************************/ +IMG_DEV_PHYADDR +MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext); + + +#ifdef SUPPORT_SGX_MMU_BYPASS +/* +****************************************************************************** + FUNCTION: EnableHostAccess + + PURPOSE: Enables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +EnableHostAccess (MMU_CONTEXT *psMMUContext); + + +/* +****************************************************************************** + FUNCTION: DisableHostAccess + + PURPOSE: Disables Host accesses to device memory, by passing the device + MMU address translation + + PARAMETERS: In: psMMUContext + RETURNS: None +******************************************************************************/ +IMG_VOID +DisableHostAccess (MMU_CONTEXT *psMMUContext); +#endif + +/* +****************************************************************************** + FUNCTION: MMU_InvalidateDirectoryCache + + PURPOSE: Invalidates the page directory cache + + PARAMETERS: In: psDevInfo + RETURNS: None +******************************************************************************/ +IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo); + +/* +****************************************************************************** + FUNCTION: MMU_BIFResetPDAlloc + + PURPOSE: Allocate a dummy Page Directory which causes all virtual + addresses to page fault. + + PARAMETERS: In: psDevInfo - device info + RETURNS: PVRSRV_OK or error +******************************************************************************/ +PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo); + +/* +****************************************************************************** + FUNCTION: MMU_BIFResetPDFree + + PURPOSE: Free resources allocated in MMU_BIFResetPDAlloc. + + PARAMETERS: In: psDevInfo - device info + RETURNS: +******************************************************************************/ +IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo); + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) +/* +****************************************************************************** + FUNCTION: MMU_MapExtSystemCacheRegs + + PURPOSE: maps external system cache control registers into SGX MMU + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode); + +/* +****************************************************************************** + FUNCTION: MMU_UnmapExtSystemCacheRegs + + PURPOSE: unmaps external system cache control registers + + PARAMETERS: In: psDeviceNode - device node + RETURNS: +******************************************************************************/ +PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif /* #if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) */ + +/* +****************************************************************************** + FUNCTION: MMU_IsHeapShared + + PURPOSE: Is this heap shared? + PARAMETERS: In: pMMU_Heap + RETURNS: true if heap is shared +******************************************************************************/ +IMG_BOOL MMU_IsHeapShared(MMU_HEAP* pMMU_Heap); + +#if defined(FIX_HW_BRN_31620) +/* +****************************************************************************** + FUNCTION: MMU_GetCacheFlushRange + + PURPOSE: Gets device physical address of the mmu context. + + PARAMETERS: In: pMMUContext - the mmu context + Out: pui32RangeMask - Bit mask showing which PD cache + lines have changed + RETURNS: None +******************************************************************************/ +IMG_VOID MMU_GetCacheFlushRange(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask); + +/* +****************************************************************************** + FUNCTION: MMU_GetPDPhysAddr + + PURPOSE: Gets device physical address of the mmu contexts PD. + + PARAMETERS: In: pMMUContext - the mmu context + Out: psDevPAddr - Address of PD + RETURNS: None +******************************************************************************/ +IMG_VOID MMU_GetPDPhysAddr(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr); + +#endif + + +IMG_VOID MMU_CheckFaultAddr(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDDevPAddr, IMG_UINT32 ui32RegVal); + +#if defined(PDUMP) +/* +****************************************************************************** + FUNCTION: MMU_GetPDumpContextID + + PURPOSE: translates device mem context to unique pdump identifier + + PARAMETERS: In: hDevMemContext - device memory per-process context + RETURNS: context identifier used internally in pdump +******************************************************************************/ +IMG_UINT32 MMU_GetPDumpContextID(IMG_HANDLE hDevMemContext); +#endif /* #ifdef PDUMP */ + +#endif /* #ifndef _MMU_H_ */ diff --git a/pvr-source/services4/srvkm/devices/sgx/pb.c b/pvr-source/services4/srvkm/devices/sgx/pb.c new file mode 100644 index 0000000..4ed18bb --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/pb.c @@ -0,0 +1,493 @@ +/*************************************************************************/ /*! +@Title Parameter Buffer management functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <stddef.h> + +#include "services_headers.h" +#include "sgx_bridge_km.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#include "pvr_bridge_km.h" +#include "pdump_km.h" +#include "sgxutils.h" + +#if !defined(__linux__) && !defined(__QNXNTO__) +#pragma message("FIXME: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE") +#endif + +#include "lists.h" + +static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC) +static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC) + +static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL; +static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL; + +static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy); +static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy); + +/* override level pointer indirection */ +/* PRQA S 5102 12 */ +IMG_EXPORT PVRSRV_ERROR +SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_BOOL bLockOnFailure, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos, + IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount) +{ + PVRSRV_STUB_PBDESC *psStubPBDesc; + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL; + PVRSRV_SGXDEV_INFO *psSGXDevInfo; + PVRSRV_ERROR eError; + + psSGXDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + psStubPBDesc = psSGXDevInfo->psStubPBDescListKM; + if (psStubPBDesc != IMG_NULL) + { + IMG_UINT32 i; + PRESMAN_ITEM psResItem; + + if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) + { + PVR_DPF((PVR_DBG_WARNING, + "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored", + ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize)); + } + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) + * psStubPBDesc->ui32SubKernelMemInfosCount, + (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos, + IMG_NULL, + "Array of Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed")); + + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto ExitNotFound; + } + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC, + psStubPBDesc, + 0, + &SGXCleanupSharedPBDescCallback); + + if (psResItem == IMG_NULL) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDesc->ui32SubKernelMemInfosCount, + ppsSharedPBDescSubKernelMemInfos, + 0); + /*not nulling pointer, out of scope*/ + + PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed")); + + eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE; + goto ExitNotFound; + } + + *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo; + *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo; + *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo; + *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo; + + *ui32SharedPBDescSubKernelMemInfosCount = + psStubPBDesc->ui32SubKernelMemInfosCount; + + *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos; + + for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++) + { + ppsSharedPBDescSubKernelMemInfos[i] = + psStubPBDesc->ppsSubKernelMemInfos[i]; + } + + psStubPBDesc->ui32RefCount++; + *phSharedPBDesc = (IMG_HANDLE)psResItem; + return PVRSRV_OK; + } + + eError = PVRSRV_OK; + if (bLockOnFailure) + { + if (psResItemCreateSharedPB == IMG_NULL) + { + psResItemCreateSharedPB = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, + psPerProc, + 0, + &SGXCleanupSharedPBDescCreateLockCallback); + + if (psResItemCreateSharedPB == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed")); + + eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE; + goto ExitNotFound; + } + PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL); + psPerProcCreateSharedPB = psPerProc; + } + else + { + eError = PVRSRV_ERROR_PROCESSING_BLOCKED; + } + } +ExitNotFound: + *phSharedPBDesc = IMG_NULL; + + return eError; +} + + +static PVRSRV_ERROR +SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn) +{ + /*PVRSRV_STUB_PBDESC **ppsStubPBDesc;*/ + IMG_UINT32 i; + PVRSRV_DEVICE_NODE *psDeviceNode; + + psDeviceNode = (PVRSRV_DEVICE_NODE*)psStubPBDescIn->hDevCookie; + + psStubPBDescIn->ui32RefCount--; + if (psStubPBDescIn->ui32RefCount == 0) + { + IMG_DEV_VIRTADDR sHWPBDescDevVAddr = psStubPBDescIn->sHWPBDescDevVAddr; + List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn); + for(i=0 ; i<psStubPBDescIn->ui32SubKernelMemInfosCount; i++) + { + PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, + psStubPBDescIn->ppsSubKernelMemInfos[i]); + } + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDescIn->ui32SubKernelMemInfosCount, + psStubPBDescIn->ppsSubKernelMemInfos, + 0); + psStubPBDescIn->ppsSubKernelMemInfos = IMG_NULL; + + PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psBlockKernelMemInfo); + + PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWBlockKernelMemInfo); + + PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWPBDescKernelMemInfo); + + PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psSharedPBDescKernelMemInfo); + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_STUB_PBDESC), + psStubPBDescIn, + 0); + /*not nulling pointer, copy on stack*/ + + /* signal the microkernel to clear its sTAHWPBDesc and s3DHWPBDesc values in sTA3DCtl */ + SGXCleanupRequest(psDeviceNode, + &sHWPBDescDevVAddr, + PVRSRV_CLEANUPCMD_PB, + CLEANUP_WITH_POLL); + } + return PVRSRV_OK; + /*return PVRSRV_ERROR_INVALID_PARAMS;*/ +} + +static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy) +{ + PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + return SGXCleanupSharedPBDescKM(psStubPBDesc); +} + +static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bDummy) +{ +#ifdef DEBUG + PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam; + PVR_ASSERT(psPerProc == psPerProcCreateSharedPB); +#else + PVR_UNREFERENCED_PARAMETER(pvParam); +#endif + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bDummy); + + psPerProcCreateSharedPB = IMG_NULL; + psResItemCreateSharedPB = IMG_NULL; + + return PVRSRV_OK; +} + + +IMG_EXPORT PVRSRV_ERROR +SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc) +{ + PVR_ASSERT(hSharedPBDesc != IMG_NULL); + + return ResManFreeResByPtr(hSharedPBDesc, CLEANUP_WITH_POLL); +} + + +IMG_EXPORT PVRSRV_ERROR +SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos, + IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount, + IMG_DEV_VIRTADDR sHWPBDescDevVAddr) +{ + PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL; + PVRSRV_ERROR eRet = PVRSRV_ERROR_INVALID_PERPROC; + IMG_UINT32 i; + PVRSRV_SGXDEV_INFO *psSGXDevInfo; + PRESMAN_ITEM psResItem; + + /* + * The caller must have previously called SGXFindSharedPBDesc with + * bLockOnFailure set, and not managed to find a suitable shared PB. + */ + if (psPerProcCreateSharedPB != psPerProc) + { + goto NoAdd; + } + else + { + PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL); + + ResManFreeResByPtr(psResItemCreateSharedPB, CLEANUP_WITH_POLL); + + PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL); + PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL); + } + + psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + psStubPBDesc = psSGXDevInfo->psStubPBDescListKM; + if (psStubPBDesc != IMG_NULL) + { + if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize) + { + PVR_DPF((PVR_DBG_WARNING, + "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored", + ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize)); + + } + + /* + * We make the caller think the add was successful, + * but return the existing shared PB desc rather than + * a new one. + */ + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC, + psStubPBDesc, + 0, + &SGXCleanupSharedPBDescCallback); + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "SGXAddSharedPBDescKM: " + "Failed to register existing shared " + "PBDesc with the resource manager")); + goto NoAddKeepPB; + } + + /* + * The caller will unreference the PB desc after + * a successful add, so up the reference count. + */ + psStubPBDesc->ui32RefCount++; + + *phSharedPBDesc = (IMG_HANDLE)psResItem; + eRet = PVRSRV_OK; + goto NoAddKeepPB; + } + + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_STUB_PBDESC), + (IMG_VOID **)&psStubPBDesc, + 0, + "Stub Parameter Buffer Description") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc " + "StubPBDesc")); + eRet = PVRSRV_ERROR_OUT_OF_MEMORY; + goto NoAdd; + } + + + psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL; + + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) + * ui32SharedPBDescSubKernelMemInfosCount, + (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos, + 0, + "Array of Kernel Memory Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " + "Failed to alloc " + "StubPBDesc->ppsSubKernelMemInfos")); + eRet = PVRSRV_ERROR_OUT_OF_MEMORY; + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + if(PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo) + != PVRSRV_OK) + { + goto NoAdd; + } + + psStubPBDesc->ui32RefCount = 1; + psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize; + psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo; + psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo; + psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo; + psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo; + + psStubPBDesc->ui32SubKernelMemInfosCount = + ui32SharedPBDescSubKernelMemInfosCount; + for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++) + { + psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i]; + if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i]) + != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " + "Failed to dissociate shared PBDesc " + "from process")); + goto NoAdd; + } + } + + psStubPBDesc->sHWPBDescDevVAddr = sHWPBDescDevVAddr; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_SHARED_PB_DESC, + psStubPBDesc, + 0, + &SGXCleanupSharedPBDescCallback); + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: " + "Failed to register shared PBDesc " + " with the resource manager")); + goto NoAdd; + } + psStubPBDesc->hDevCookie = hDevCookie; + + /* Finally everything was prepared successfully so link the new + * PB in to place. */ + List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM), + psStubPBDesc); + + *phSharedPBDesc = (IMG_HANDLE)psResItem; + + return PVRSRV_OK; + +NoAdd: + if(psStubPBDesc) + { + if(psStubPBDesc->ppsSubKernelMemInfos) + { + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount, + psStubPBDesc->ppsSubKernelMemInfos, + 0); + psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL; + } + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_STUB_PBDESC), + psStubPBDesc, + 0); + /*not nulling pointer, out of scope*/ + } + +NoAddKeepPB: + for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++) + { + PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i]); + } + + PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo); + + PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo); + PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo); + + return eRet; +} + +/****************************************************************************** + End of file (pb.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgx_bridge_km.h b/pvr-source/services4/srvkm/devices/sgx/sgx_bridge_km.h new file mode 100644 index 0000000..f281c4e --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgx_bridge_km.h @@ -0,0 +1,279 @@ +/*************************************************************************/ /*! +@Title SGX Bridge Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Header for the SGX Bridge code +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SGX_BRIDGE_KM_H__) +#define __SGX_BRIDGE_KM_H__ + +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#include "sgx_bridge.h" +#include "pvr_bridge.h" +#include "perproc.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +IMG_IMPORT +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK_KM *psKick); +#else +PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick); +#endif + +#if defined(SGX_FEATURE_2D_HARDWARE) +IMG_IMPORT +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK_KM *psKick); +#else +PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick); +#endif +#endif + +IMG_IMPORT +PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, +#if defined (SUPPORT_SID_INTERFACE) + SGX_CCB_KICK_KM *psCCBKick); +#else + SGX_CCB_KICK *psCCBKick); +#endif + +IMG_IMPORT +PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_DEV_PHYADDR *pDevPAddr, + IMG_CPU_PHYADDR *pCpuPAddr); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie, + IMG_HANDLE hDevMemContext, + IMG_DEV_PHYADDR *psPDDevPAddr); + +IMG_IMPORT +PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie, + SGX_CLIENT_INFO* psClientInfo); + +IMG_IMPORT +PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, + SGX_MISC_INFO *psMiscInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext); + +IMG_IMPORT +PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ArraySize, + PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData, + IMG_UINT32 *pui32DataCount, + IMG_UINT32 *pui32ClockSpeed, + IMG_UINT32 *pui32HostTimeStamp); + +IMG_IMPORT +PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_BOOL bWaitForComplete); + +IMG_IMPORT +PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, +#if defined (SUPPORT_SID_INTERFACE) + PVRSRV_HEAP_INFO_KM *pasHeapInfo, + IMG_DEV_PHYADDR *psPDDevPAddr); +#else + SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo); +#endif + +IMG_IMPORT +PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevHandle, +#if defined (SUPPORT_SID_INTERFACE) + SGX_BRIDGE_INIT_INFO_KM *psInitInfo); +#else + SGX_BRIDGE_INIT_INFO *psInitInfo); +#endif + +/*! + * ***************************************************************************** + * @brief Looks for a parameter buffer description that corresponds to + * a buffer of size ui32TotalPBSize, optionally taking the lock + * needed for SharedPBCreation on failure. + * + * Note if a PB Desc is found then its internal reference counter + * is automatically incremented. It is your responsability to call + * SGXUnrefSharedPBDesc to decrement this reference and free associated + * resources when you are done. + * + * If bLockOnFailure is set, and a suitable shared PB isn't found, + * an internal flag is set, allowing this process to create a + * shared PB. Any other process calling this function with + * bLockOnFailure set, will receive the return code + * PVRSRV_ERROR_PROCESSING_BLOCKED, indicating that it needs + * to retry the function call. The internal flag is cleared + * when this process creates a shared PB. + * + * Note: You are responsible for freeing the list returned in + * pppsSharedPBDescSubKernelMemInfos + * via OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + * sizeof(PVRSRV_KERNEL_MEM_INFO *) + * * ui32SharedPBDescSubKernelMemInfosCount, + * ppsSharedPBDescSubKernelMemInfos, + * NULL); + * + * @param[in] psPerProc + * @param[in] hDevCookie + * @param[in] bLockOnError + * @param[in] ui32TotalPBSize + * @param[in] phSharedPBDesc + * @param[out] ppsSharedPBDescKernelMemInfo + * @param[out] ppsHWPBDescKernelMemInfo + * @param[out] pppsSharedPBDescSubKernelMemInfos A list of integral sub meminfos. + * @param[out] ui32SharedPBDescSubKernelMemInfosCount + * + * @return PVRSRV_ERROR + ********************************************************************************/ +/* disable QAC pointer level check for over 2 */ +/* PRQA S 5102++ */ +IMG_IMPORT PVRSRV_ERROR +SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + IMG_BOOL bLockOnFailure, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos, + IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount); + +/*! + * ***************************************************************************** + * @brief Decrements the reference counter and frees all userspace resources + * associated with a SharedPBDesc. + * + * @param hSharedPBDesc + * + * @return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc); + +/*! + * ***************************************************************************** + * @brief Links a new SharedPBDesc into a kernel managed list that can + * then be queried by other clients. + * + * As a side affect this function also dissociates the SharedPBDesc + * from the calling process so that the memory won't be freed if the + * process dies/exits. (The kernel assumes responsability over the + * memory at the same time) + * + * As well as the psSharedPBDescKernelMemInfo you must also pass + * a complete list of other meminfos that are integral to the + * shared PB description. (Although the kernel doesn't have direct + * access to the shared PB desc it still needs to be able to + * clean up all the associated resources when it is no longer + * in use.) + * + * If the dissociation fails then all the memory associated with + * the psSharedPBDescKernelMemInfo and all entries in psKernelMemInfos + * will be freed by kernel services! Because of this, you are + * responsible for freeing the corresponding client meminfos _before_ + * calling SGXAddSharedPBDescKM. + * + * This function will return an error unless a succesful call to + * SGXFindSharedPBDesc, with bLockOnFailure set, has been made. + * + * @param psPerProc + * @param hDevCookie + * @param psSharedPBDescKernelMemInfo + * @param psHWPBDescKernelMemInfo + * @param psBlockKernelMemInfo + * @param ui32TotalPBSize The size of the associated parameter buffer + * @param ppsSharedPBDescSubKernelMemInfos A list of other meminfos integral to + * the shared PB description. + * @param ui32SharedPBDescSubKernelMemInfosCount The number of entires in + * psKernelMemInfos + * @param sHWPBDescDevVAddr The device virtual address of the HWPBDesc + * + * @return PVRSRV_ERROR + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevCookie, + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo, + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo, + IMG_UINT32 ui32TotalPBSize, + IMG_HANDLE *phSharedPBDesc, + PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos, + IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount, + IMG_DEV_VIRTADDR sHWPBDescDevVAddr); + + +/*! + * ***************************************************************************** + * @brief Gets device information that is not intended to be passed + on beyond the srvclient libs. + * + * @param[in] hDevCookie + * @param[out] psSGXInternalDevInfo + * + * @return + ********************************************************************************/ +IMG_IMPORT PVRSRV_ERROR +SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, +#if defined (SUPPORT_SID_INTERFACE) + SGX_INTERNAL_DEVINFO_KM *psSGXInternalDevInfo); +#else + SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo); +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* __SGX_BRIDGE_KM_H__ */ + +/****************************************************************************** + End of file (sgx_bridge_km.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxconfig.h b/pvr-source/services4/srvkm/devices/sgx/sgxconfig.h new file mode 100644 index 0000000..b9ebab9 --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxconfig.h @@ -0,0 +1,481 @@ +/*************************************************************************/ /*! +@Title device configuration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SGXCONFIG_H__ +#define __SGXCONFIG_H__ + +#include "sgxdefs.h" + +#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX +#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D + +#define DEV_MAJOR_VERSION 1 +#define DEV_MINOR_VERSION 0 + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) +#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00001000 +#else +#define SGX_KERNEL_DATA_HEAP_OFFSET 0x00000000 +#endif + +#if !defined(ION_HEAP_SIZE) && defined(SUPPORT_ION) + /* Default the Ion heap to 16MB */ + #define ION_HEAP_SIZE 0x01000000 +#else + #define ION_HEAP_SIZE 0 +#endif + + +#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32 +#if defined(FIX_HW_BRN_31620) + #if defined(SGX_FEATURE_2D_HARDWARE) + #define SGX_2D_HEAP_BASE 0x04000000 + #define SGX_2D_HEAP_SIZE (0x08000000-0x04000000-0x00001000) + #endif + + #define SGX_GENERAL_HEAP_BASE 0x08000000 + #define SGX_GENERAL_HEAP_SIZE (0xB8000000-0x00001000) + + /* + * For hybrid PB we have to split virtual PB range between the shared + * PB and percontext PB due to the fact we only have one heap config + * per device. + * If hybrid PB is enabled we split the space acording to HYBRID_SHARED_PB_SIZE. + * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the + * remainder is the size of the percontext PB. + * If hybrid PB is not enabled then we still create both heaps (helps keep + * the code clean) and define the size of the unused one to 0 + */ + + #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000 + + /* By default we split the PB 50/50 */ +#if !defined(HYBRID_SHARED_PB_SIZE) + #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1) +#endif +#if defined(SUPPORT_HYBRID_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE) + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000) +#else +#if defined(SUPPORT_PERCONTEXT_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE 0 + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0 + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) +#endif +#if defined(SUPPORT_SHARED_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0 +#endif +#endif + + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xC0000000 + /* Size is defiend above */ + + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE) + /* Size is defiend above */ + + #define SGX_TADATA_HEAP_BASE 0xD0000000 + #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000) + + #define SGX_SYNCINFO_HEAP_BASE 0xE0000000 + #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000) + + #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xE4000000 + #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000) + + #define SGX_KERNEL_CODE_HEAP_BASE 0xE8000000 + #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000) + + #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xEC000000 + #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000) + + #define SGX_KERNEL_DATA_HEAP_BASE (0xF0000000+SGX_KERNEL_DATA_HEAP_OFFSET) + #define SGX_KERNEL_DATA_HEAP_SIZE (0x03000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET)) + + /* Actual Pixel and Vertex shared heaps sizes may be reduced by + * override - see SGX_USE_CODE_SEGMENT_RANGE_BITS.*/ + #define SGX_PIXELSHADER_HEAP_BASE 0xF4000000 + #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000) + + #define SGX_VERTEXSHADER_HEAP_BASE 0xFC000000 + #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000) +#else /* FIX_HW_BRN_31620 */ + #if defined(SGX_FEATURE_2D_HARDWARE) + #define SGX_2D_HEAP_BASE 0x00100000 + #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000) + #endif + + #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000 + #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000) + #endif + + #if !defined(SUPPORT_MEMORY_TILING) + #if defined (SUPPORT_ION) + #define SGX_GENERAL_HEAP_BASE 0x10000000 + #define SGX_GENERAL_HEAP_SIZE (0xC2000000-ION_HEAP_SIZE-0x00001000) + + #define SGX_ION_HEAP_BASE (SGX_GENERAL_HEAP_BASE+SGX_GENERAL_HEAP_SIZE+0x00001000) + #define SGX_ION_HEAP_SIZE (ION_HEAP_SIZE-0x00001000) + #else + #define SGX_GENERAL_HEAP_BASE 0x10000000 + #define SGX_GENERAL_HEAP_SIZE (0xC2000000-0x00001000) + #endif + #else + #include <sgx_msvdx_defs.h> + /* Create heaps with memory tiling enabled. + * SGX HW limit is 10 heaps. + */ + /* Tiled heap space is taken from general heap */ + #define SGX_GENERAL_HEAP_BASE 0x10000000 + #define SGX_GENERAL_HEAP_SIZE (0xB5000000-0x00001000) + + #define SGX_VPB_TILED_HEAP_STRIDE TILING_TILE_STRIDE_2K + #define SGX_VPB_TILED_HEAP_BASE 0xC5000000 + #define SGX_VPB_TILED_HEAP_SIZE (0x0D000000-0x00001000) + + /* Check tiled heap base alignment */ + #if((SGX_VPB_TILED_HEAP_BASE & SGX_BIF_TILING_ADDR_INV_MASK) != 0) + #error "sgxconfig.h: SGX_VPB_TILED_HEAP has insufficient alignment" + #endif + + #endif /* SUPPORT_MEMORY_TILING */ + + /* + * For hybrid PB we have to split virtual PB range between the shared + * PB and percontext PB due to the fact we only have one heap config + * per device. + * If hybrid PB is enabled we split the space acording to HYBRID_SHARED_PB_SIZE. + * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the + * remainder is the size of the percontext PB. + * If hybrid PB is not enabled then we still create both heaps (helps keep + * the code clean) and define the size of the unused one to 0 + */ + + #define SGX_3DPARAMETERS_HEAP_SIZE 0x10000000 + + /* By default we split the PB 50/50 */ +#if !defined(HYBRID_SHARED_PB_SIZE) + #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1) +#endif +#if defined(SUPPORT_HYBRID_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE) + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000) +#else +#if defined(SUPPORT_PERCONTEXT_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE 0 + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0 + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) +#endif +#if defined(SUPPORT_SHARED_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0 +#endif +#endif + + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0xD2000000 + /* Size is defiend above */ + + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE) + /* Size is defiend above */ + + #define SGX_TADATA_HEAP_BASE 0xE2000000 + #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000) + + #define SGX_SYNCINFO_HEAP_BASE 0xEF000000 + #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000) + + #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF0000000 + #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000) + + #define SGX_KERNEL_CODE_HEAP_BASE 0xF2000000 + #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000) + + #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF2400000 + #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000) + + #define SGX_KERNEL_DATA_HEAP_BASE (0xF4000000+SGX_KERNEL_DATA_HEAP_OFFSET) + #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET)) + + /* Actual Pixel and Vertex shared heaps sizes may be reduced by + * override - see SGX_USE_CODE_SEGMENT_RANGE_BITS.*/ + #define SGX_PIXELSHADER_HEAP_BASE 0xF9000000 + #define SGX_PIXELSHADER_HEAP_SIZE (0x05000000-0x00001000) + + #define SGX_VERTEXSHADER_HEAP_BASE 0xFE000000 + #define SGX_VERTEXSHADER_HEAP_SIZE (0x02000000-0x00001000) +#endif /* FIX_HW_BRN_31620 */ + /* signal we've identified the core by the build */ + #define SGX_CORE_IDENTIFIED +#endif /* SGX_FEATURE_ADDRESS_SPACE_SIZE == 32 */ + +#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28 + +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000 + #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000) + + #define SGX_GENERAL_HEAP_BASE 0x01800000 + #define SGX_GENERAL_HEAP_SIZE (0x07000000-ION_HEAP_SIZE-0x00001000) + +#else + #define SGX_GENERAL_HEAP_BASE 0x00001000 +#if defined(SUPPORT_LARGE_GENERAL_HEAP) + #define SGX_GENERAL_HEAP_SIZE (0x0B800000-ION_HEAP_SIZE-0x00001000-0x00001000) +#else + #define SGX_GENERAL_HEAP_SIZE (0x08800000-ION_HEAP_SIZE-0x00001000-0x00001000) +#endif +#endif + +#if defined(SUPPORT_ION) + #define SGX_ION_HEAP_BASE (SGX_GENERAL_HEAP_BASE+SGX_GENERAL_HEAP_SIZE+0x00001000) + #define SGX_ION_HEAP_SIZE (ION_HEAP_SIZE-0x00001000) +#endif + /* + * For hybrid PB we have to split virtual PB range between the shared + * PB and percontext PB due to the fact we only have one heap config + * per device. + * If hybrid PB is enabled we split the space acording to HYBRID_SHARED_PB_SIZE. + * i.e. HYBRID_SHARED_PB_SIZE defines the size of the shared PB and the + * remainder is the size of the percontext PB. + * If hybrid PB is not enabled then we still create both heaps (helps keep + * the code clean) and define the size of the unused one to 0 + */ +#if defined(SUPPORT_LARGE_GENERAL_HEAP) + #define SGX_3DPARAMETERS_HEAP_SIZE 0x01000000 +#else + #define SGX_3DPARAMETERS_HEAP_SIZE 0x04000000 +#endif + + /* By default we split the PB 50/50 */ +#if !defined(HYBRID_SHARED_PB_SIZE) + #define HYBRID_SHARED_PB_SIZE (SGX_3DPARAMETERS_HEAP_SIZE >> 1) +#endif +#if defined(SUPPORT_HYBRID_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE (HYBRID_SHARED_PB_SIZE) + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (HYBRID_SHARED_PB_SIZE-0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - SGX_SHARED_3DPARAMETERS_SIZE - 0x00001000) +#else +#if defined(SUPPORT_PERCONTEXT_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE 0 + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE 0 + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) +#endif +#if defined(SUPPORT_SHARED_PB) + #define SGX_SHARED_3DPARAMETERS_SIZE SGX_3DPARAMETERS_HEAP_SIZE + #define SGX_SHARED_3DPARAMETERS_HEAP_SIZE (SGX_3DPARAMETERS_HEAP_SIZE - 0x00001000) + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE 0 +#endif +#endif + +#if defined(SUPPORT_LARGE_GENERAL_HEAP) + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x0B800000 +#else + #define SGX_SHARED_3DPARAMETERS_HEAP_BASE 0x08800000 +#endif + + /* Size is defined above */ + + #define SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE (SGX_SHARED_3DPARAMETERS_HEAP_BASE + SGX_SHARED_3DPARAMETERS_SIZE) + /* Size is defined above */ + + #define SGX_TADATA_HEAP_BASE 0x0C800000 + #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000) + + #define SGX_SYNCINFO_HEAP_BASE 0x0D800000 + #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000) + + #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000 + #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000) + + #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000 + #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000) + + #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000 + #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000) + + #define SGX_KERNEL_DATA_HEAP_BASE (0x0F000000+SGX_KERNEL_DATA_HEAP_OFFSET) + #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-(0x00001000+SGX_KERNEL_DATA_HEAP_OFFSET)) + + #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000 + #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000) + + #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000 + #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000) + + /* signal we've identified the core by the build */ + #define SGX_CORE_IDENTIFIED + +#endif /* SGX_FEATURE_ADDRESS_SPACE_SIZE == 28 */ + +#if !defined(SGX_CORE_IDENTIFIED) + #error "sgxconfig.h: ERROR: unspecified SGX Core version" +#endif + +/********************************************************************************* + * + * SGX_PDSPIXEL_CODEDATA_HEAP_BASE + 64MB range must include PDSVERTEX_CODEDATA and KERNEL_CODE heaps + * + ********************************************************************************/ +#if !defined (SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE) + #if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000) + #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE" + #endif + + #if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE - SGX_PDSPIXEL_CODEDATA_HEAP_BASE) > 0x4000000) + #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP_BASE out of range of SGX_PDSPIXEL_CODEDATA_HEAP_BASE" + #endif +#endif + +/********************************************************************************* + * + * The General Mapping heap must be within the 2D requestor range of the 2D heap base + * + ********************************************************************************/ +#if defined(SGX_FEATURE_2D_HARDWARE) && defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE - SGX_2D_HEAP_BASE) >= EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) + #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP inaccessable by 2D requestor" + #endif +#endif + +/********************************************************************************* + * + * The kernel code heap base must be aligned to a USSE code page + * + ********************************************************************************/ +#if defined (EURASIA_USE_CODE_PAGE_SIZE) + #if ((SGX_KERNEL_CODE_HEAP_BASE & (EURASIA_USE_CODE_PAGE_SIZE - 1)) != 0) + #error "sgxconfig.h: ERROR: Kernel code heap base misalignment" + #endif +#endif + +/********************************************************************************* + * + * Heap overlap check + * + ********************************************************************************/ +#if defined(SGX_FEATURE_2D_HARDWARE) + #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_MAPPING_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_MAPPING_HEAP" + #endif + #else + #if ((SGX_2D_HEAP_BASE + SGX_2D_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_2D_HEAP overlaps SGX_GENERAL_HEAP_BASE" + #endif + #endif +#endif + +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + #if ((SGX_GENERAL_MAPPING_HEAP_BASE + SGX_GENERAL_MAPPING_HEAP_SIZE) >= SGX_GENERAL_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_MAPPING_HEAP overlaps SGX_GENERAL_HEAP" + #endif +#endif + +#if defined(SUPPORT_HYBRID_PB) + #if ((HYBRID_SHARED_PB_SIZE + 0x000001000) > SGX_3DPARAMETERS_HEAP_SIZE) + #error "sgxconfig.h: ERROR: HYBRID_SHARED_PB_SIZE too large" + #endif +#endif + +#if defined(SUPPORT_MEMORY_TILING) + #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_VPB_TILED_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_VPB_TILED_HEAP" + #endif + #if ((SGX_VPB_TILED_HEAP_BASE + SGX_VPB_TILED_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_VPB_TILED_HEAP overlaps SGX_3DPARAMETERS_HEAP" + #endif +#else + #if defined(SUPPORT_ION) + #if ((SGX_ION_HEAP_BASE + SGX_ION_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_ION_HEAP overlaps SGX_3DPARAMETERS_HEAP" + #endif + #endif + #if ((SGX_GENERAL_HEAP_BASE + SGX_GENERAL_HEAP_SIZE) >= SGX_SHARED_3DPARAMETERS_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_GENERAL_HEAP overlaps SGX_3DPARAMETERS_HEAP" + #endif +#endif + +#if (((SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE + SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE) >= SGX_TADATA_HEAP_BASE) && (SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE > 0)) + #error "sgxconfig.h: ERROR: SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE overlaps SGX_TADATA_HEAP" +#endif + +#if ((SGX_TADATA_HEAP_BASE + SGX_TADATA_HEAP_SIZE) >= SGX_SYNCINFO_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_TADATA_HEAP overlaps SGX_SYNCINFO_HEAP" +#endif + +#if ((SGX_SYNCINFO_HEAP_BASE + SGX_SYNCINFO_HEAP_SIZE) >= SGX_PDSPIXEL_CODEDATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_SYNCINFO_HEAP overlaps SGX_PDSPIXEL_CODEDATA_HEAP" +#endif + +#if ((SGX_PDSPIXEL_CODEDATA_HEAP_BASE + SGX_PDSPIXEL_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_CODE_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PDSPIXEL_CODEDATA_HEAP overlaps SGX_KERNEL_CODE_HEAP" +#endif + +#if ((SGX_KERNEL_CODE_HEAP_BASE + SGX_KERNEL_CODE_HEAP_SIZE) >= SGX_PDSVERTEX_CODEDATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_KERNEL_CODE_HEAP overlaps SGX_PDSVERTEX_CODEDATA_HEAP" +#endif + +#if ((SGX_PDSVERTEX_CODEDATA_HEAP_BASE + SGX_PDSVERTEX_CODEDATA_HEAP_SIZE) >= SGX_KERNEL_DATA_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PDSVERTEX_CODEDATA_HEAP overlaps SGX_KERNEL_DATA_HEAP" +#endif + +#if ((SGX_KERNEL_DATA_HEAP_BASE + SGX_KERNEL_DATA_HEAP_SIZE) >= SGX_PIXELSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_KERNEL_DATA_HEAP overlaps SGX_PIXELSHADER_HEAP" +#endif + +#if ((SGX_PIXELSHADER_HEAP_BASE + SGX_PIXELSHADER_HEAP_SIZE) >= SGX_VERTEXSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_PIXELSHADER_HEAP overlaps SGX_VERTEXSHADER_HEAP" +#endif + +#if ((SGX_VERTEXSHADER_HEAP_BASE + SGX_VERTEXSHADER_HEAP_SIZE) < SGX_VERTEXSHADER_HEAP_BASE) + #error "sgxconfig.h: ERROR: SGX_VERTEXSHADER_HEAP_BASE size cause wraparound" +#endif + +#endif /* __SGXCONFIG_H__ */ + +/***************************************************************************** + End of file (sgxconfig.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxinfokm.h b/pvr-source/services4/srvkm/devices/sgx/sgxinfokm.h new file mode 100644 index 0000000..125da09 --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxinfokm.h @@ -0,0 +1,610 @@ +/*************************************************************************/ /*! +@Title SGX kernel services structues/functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Structures and inline functions for KM services component +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __SGXINFOKM_H__ +#define __SGXINFOKM_H__ + +#include "sgxdefs.h" +#include "device.h" +#include "power.h" +#include "sysconfig.h" +#include "sgxscript.h" +#include "sgxinfo.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************************************************/ +/* kernel only defines: */ +/****************************************************************************/ +/* SGXDeviceMap Flag defines */ +#define SGX_HOSTPORT_PRESENT 0x00000001UL + + +/* + SGX PDUMP register bank name (prefix) +*/ +#define SGX_PDUMPREG_NAME "SGXREG" + +/****************************************************************************/ +/* kernel only structures: */ +/****************************************************************************/ + +/*Forward declaration*/ +typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; + + +typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO; + +typedef struct _PVRSRV_SGXDEV_INFO_ +{ + PVRSRV_DEVICE_TYPE eDeviceType; + PVRSRV_DEVICE_CLASS eDeviceClass; + + IMG_UINT8 ui8VersionMajor; + IMG_UINT8 ui8VersionMinor; + IMG_UINT32 ui32CoreConfig; + IMG_UINT32 ui32CoreFlags; + + /* Kernel mode linear address of device registers */ + IMG_PVOID pvRegsBaseKM; + +#if defined(SGX_FEATURE_HOST_PORT) + /* Kernel mode linear address of host port */ + IMG_PVOID pvHostPortBaseKM; + /* HP size */ + IMG_UINT32 ui32HPSize; + /* HP syspaddr */ + IMG_SYS_PHYADDR sHPSysPAddr; +#endif + + /* FIXME: The alloc for this should go through OSAllocMem in future */ + IMG_HANDLE hRegMapping; + + /* System physical address of device registers*/ + IMG_SYS_PHYADDR sRegsPhysBase; + /* Register region size in bytes */ + IMG_UINT32 ui32RegSize; + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* external system cache register region size in bytes */ + IMG_UINT32 ui32ExtSysCacheRegsSize; + /* external system cache register device relative physical address */ + IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase; + /* ptr to page table */ + IMG_UINT32 *pui32ExtSystemCacheRegsPT; + /* handle to page table alloc/mapping */ + IMG_HANDLE hExtSystemCacheRegsPTPageOSMemHandle; + /* sys phys addr of PT */ + IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr; +#endif + + /* SGX clock speed */ + IMG_UINT32 ui32CoreClockSpeed; + IMG_UINT32 ui32uKernelTimerClock; + IMG_BOOL bSGXIdle; + + PVRSRV_STUB_PBDESC *psStubPBDescListKM; + + + /* kernel memory context info */ + IMG_DEV_PHYADDR sKernelPDDevPAddr; + + IMG_UINT32 ui32HeapCount; /*!< heap count */ + IMG_VOID *pvDeviceMemoryHeap; + PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo; /*!< meminfo for CCB in device accessible memory */ + PVRSRV_SGX_KERNEL_CCB *psKernelCCB; /*!< kernel mode linear address of CCB in device accessible memory */ + PPVRSRV_SGX_CCB_INFO psKernelCCBInfo; /*!< CCB information structure */ + PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo; /*!< meminfo for CCB control in device accessible memory */ + PVRSRV_SGX_CCB_CTL *psKernelCCBCtl; /*!< kernel mode linear address of CCB control in device accessible memory */ + PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; /*!< meminfo for kernel CCB event kicker */ + IMG_UINT32 *pui32KernelCCBEventKicker; /*!< kernel mode linear address of kernel CCB event kicker */ +#if defined(PDUMP) + IMG_UINT32 ui32KernelCCBEventKickerDumpVal; /*!< pdump copy of the kernel CCB event kicker */ +#endif /* PDUMP */ + PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo; /*!< kernel mode linear address of SGX misc info buffer */ + IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX]; /*!< ukernel host kick offests */ +#if defined(SGX_SUPPORT_HWPROFILING) + PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo; +#endif + PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo; /*!< Meminfo for hardware performace circular buffer */ + PPVRSRV_KERNEL_MEM_INFO psKernelTASigBufferMemInfo; /*!< Meminfo for TA signature buffer */ + PPVRSRV_KERNEL_MEM_INFO psKernel3DSigBufferMemInfo; /*!< Meminfo for 3D signature buffer */ +#if defined(FIX_HW_BRN_29702) + PPVRSRV_KERNEL_MEM_INFO psKernelCFIMemInfo; /*!< Meminfo for cfi */ +#endif +#if defined(FIX_HW_BRN_29823) + PPVRSRV_KERNEL_MEM_INFO psKernelDummyTermStreamMemInfo; /*!< Meminfo for dummy terminate stream */ +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559) + PPVRSRV_KERNEL_MEM_INFO psKernelVDMSnapShotBufferMemInfo; /*!< Meminfo for dummy snapshot buffer */ + PPVRSRV_KERNEL_MEM_INFO psKernelVDMCtrlStreamBufferMemInfo; /*!< Meminfo for dummy control stream */ +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + PPVRSRV_KERNEL_MEM_INFO psKernelVDMStateUpdateBufferMemInfo; /*!< Meminfo for state update buffer */ +#endif +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo; /*!< Meminfo for EDM status buffer */ +#endif + /* Client reference count */ + IMG_UINT32 ui32ClientRefCount; + + /* cache control word for micro kernel cache flush/invalidates */ + IMG_UINT32 ui32CacheControl; + + /* client-side build options */ + IMG_UINT32 ui32ClientBuildOptions; + + /* client-side microkernel structure sizes */ + SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes; + + /* + if we don't preallocate the pagetables we must + insert newly allocated page tables dynamically + */ + IMG_VOID *pvMMUContextList; + + /* Copy of registry ForcePTOff entry */ + IMG_BOOL bForcePTOff; + + IMG_UINT32 ui32EDMTaskReg0; + IMG_UINT32 ui32EDMTaskReg1; + + IMG_UINT32 ui32ClkGateCtl; + IMG_UINT32 ui32ClkGateCtl2; + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; +#if defined(SGX_FEATURE_MP) + IMG_UINT32 ui32MasterClkGateStatusReg; + IMG_UINT32 ui32MasterClkGateStatusMask; + IMG_UINT32 ui32MasterClkGateStatus2Reg; + IMG_UINT32 ui32MasterClkGateStatus2Mask; +#endif /* SGX_FEATURE_MP */ + SGX_INIT_SCRIPTS sScripts; + + /* Members associated with dummy PD needed for BIF reset */ + IMG_HANDLE hBIFResetPDOSMemHandle; + IMG_DEV_PHYADDR sBIFResetPDDevPAddr; + IMG_DEV_PHYADDR sBIFResetPTDevPAddr; + IMG_DEV_PHYADDR sBIFResetPageDevPAddr; + IMG_UINT32 *pui32BIFResetPD; + IMG_UINT32 *pui32BIFResetPT; + + +#if defined(SUPPORT_HW_RECOVERY) + /* Timeout callback handle */ + IMG_HANDLE hTimer; + /* HW recovery Time stamp */ + IMG_UINT32 ui32TimeStamp; +#endif + + /* Number of SGX resets */ + IMG_UINT32 ui32NumResets; + + /* host control */ + PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo; + SGXMKIF_HOST_CTL *psSGXHostCtl; + + /* TA/3D control */ + PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo; + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + PVRSRV_KERNEL_MEM_INFO *psKernelSGXPTLAWriteBackMemInfo; +#endif + + IMG_UINT32 ui32Flags; + + /* memory tiling range usage */ + IMG_UINT32 ui32MemTilingUsage; + + #if defined(PDUMP) + PVRSRV_SGX_PDUMP_CONTEXT sPDContext; + #endif + +#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) + /* SGX MMU dummy page details */ + IMG_VOID *pvDummyPTPageCpuVAddr; + IMG_DEV_PHYADDR sDummyPTDevPAddr; + IMG_HANDLE hDummyPTPageOSMemHandle; + IMG_VOID *pvDummyDataPageCpuVAddr; + IMG_DEV_PHYADDR sDummyDataDevPAddr; + IMG_HANDLE hDummyDataPageOSMemHandle; +#endif +#if defined(PDUMP) + PDUMP_MMU_ATTRIB sMMUAttrib; +#endif + IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA]; + +#if defined(FIX_HW_BRN_31620) + /* Dummy page refs */ + IMG_VOID *pvBRN31620DummyPageCpuVAddr; + IMG_HANDLE hBRN31620DummyPageOSMemHandle; + IMG_DEV_PHYADDR sBRN31620DummyPageDevPAddr; + + /* Dummy PT refs */ + IMG_VOID *pvBRN31620DummyPTCpuVAddr; + IMG_HANDLE hBRN31620DummyPTOSMemHandle; + IMG_DEV_PHYADDR sBRN31620DummyPTDevPAddr; + + IMG_HANDLE hKernelMMUContext; +#endif + +} PVRSRV_SGXDEV_INFO; + + +typedef struct _SGX_TIMING_INFORMATION_ +{ + IMG_UINT32 ui32CoreClockSpeed; + IMG_UINT32 ui32HWRecoveryFreq; + IMG_BOOL bEnableActivePM; + IMG_UINT32 ui32ActivePowManLatencyms; + IMG_UINT32 ui32uKernelFreq; +} SGX_TIMING_INFORMATION; + +/* FIXME Rename this structure to sg more generalised as it's been extended*/ +/* SGX device map */ +typedef struct _SGX_DEVICE_MAP_ +{ + IMG_UINT32 ui32Flags; + + /* Registers */ + IMG_SYS_PHYADDR sRegsSysPBase; + IMG_CPU_PHYADDR sRegsCpuPBase; + IMG_CPU_VIRTADDR pvRegsCpuVBase; + IMG_UINT32 ui32RegsSize; + +#if defined(SGX_FEATURE_HOST_PORT) + IMG_SYS_PHYADDR sHPSysPBase; + IMG_CPU_PHYADDR sHPCpuPBase; + IMG_UINT32 ui32HPSize; +#endif + + /* Local Device Memory Region: (if present) */ + IMG_SYS_PHYADDR sLocalMemSysPBase; + IMG_DEV_PHYADDR sLocalMemDevPBase; + IMG_CPU_PHYADDR sLocalMemCpuPBase; + IMG_UINT32 ui32LocalMemSize; + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + IMG_UINT32 ui32ExtSysCacheRegsSize; + IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase; +#endif + + /* device interrupt IRQ */ + IMG_UINT32 ui32IRQ; + +#if !defined(SGX_DYNAMIC_TIMING_INFO) + /* timing information*/ + SGX_TIMING_INFORMATION sTimingInfo; +#endif +#if defined(PDUMP) + /* pdump memory region name */ + IMG_CHAR *pszPDumpDevName; +#endif +} SGX_DEVICE_MAP; + + +struct _PVRSRV_STUB_PBDESC_ +{ + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32TotalPBSize; + PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos; + IMG_UINT32 ui32SubKernelMemInfosCount; + IMG_HANDLE hDevCookie; + PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo; + PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo; + IMG_DEV_VIRTADDR sHWPBDescDevVAddr; + PVRSRV_STUB_PBDESC *psNext; + PVRSRV_STUB_PBDESC **ppsThis; +}; + +/*! + ****************************************************************************** + * CCB control structure for SGX + *****************************************************************************/ +typedef struct _PVRSRV_SGX_CCB_INFO_ +{ + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo; /*!< meminfo for CCB in device accessible memory */ + PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo; /*!< meminfo for CCB control in device accessible memory */ + SGXMKIF_COMMAND *psCommands; /*!< linear address of the array of commands */ + IMG_UINT32 *pui32WriteOffset; /*!< linear address of the write offset into array of commands */ + volatile IMG_UINT32 *pui32ReadOffset; /*!< linear address of the read offset into array of commands */ +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; /*!< for pdumping */ +#endif +} PVRSRV_SGX_CCB_INFO; + + +typedef struct _SGX_BRIDGE_INIT_INFO_KM_ +{ + IMG_HANDLE hKernelCCBMemInfo; + IMG_HANDLE hKernelCCBCtlMemInfo; + IMG_HANDLE hKernelCCBEventKickerMemInfo; + IMG_HANDLE hKernelSGXHostCtlMemInfo; + IMG_HANDLE hKernelSGXTA3DCtlMemInfo; +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + IMG_HANDLE hKernelSGXPTLAWriteBackMemInfo; +#endif + IMG_HANDLE hKernelSGXMiscMemInfo; + + IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX]; + + SGX_INIT_SCRIPTS sScripts; + + IMG_UINT32 ui32ClientBuildOptions; + SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes; + +#if defined(SGX_SUPPORT_HWPROFILING) + IMG_HANDLE hKernelHWProfilingMemInfo; +#endif +#if defined(SUPPORT_SGX_HWPERF) + IMG_HANDLE hKernelHWPerfCBMemInfo; +#endif + IMG_HANDLE hKernelTASigBufferMemInfo; + IMG_HANDLE hKernel3DSigBufferMemInfo; + +#if defined(FIX_HW_BRN_29702) + IMG_HANDLE hKernelCFIMemInfo; +#endif +#if defined(FIX_HW_BRN_29823) + IMG_HANDLE hKernelDummyTermStreamMemInfo; +#endif +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + IMG_HANDLE hKernelEDMStatusBufferMemInfo; +#endif + + IMG_UINT32 ui32EDMTaskReg0; + IMG_UINT32 ui32EDMTaskReg1; + + IMG_UINT32 ui32ClkGateStatusReg; + IMG_UINT32 ui32ClkGateStatusMask; +#if defined(SGX_FEATURE_MP) +// IMG_UINT32 ui32MasterClkGateStatusReg; +// IMG_UINT32 ui32MasterClkGateStatusMask; +// IMG_UINT32 ui32MasterClkGateStatus2Reg; +// IMG_UINT32 ui32MasterClkGateStatus2Mask; +#endif /* SGX_FEATURE_MP */ + + IMG_UINT32 ui32CacheControl; + + IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA]; + IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES]; + +} SGX_BRIDGE_INIT_INFO_KM; + + +typedef struct _SGX_INTERNEL_STATUS_UPDATE_KM_ +{ + CTL_STATUS sCtlStatus; + IMG_HANDLE hKernelMemInfo; +} SGX_INTERNEL_STATUS_UPDATE_KM; + + +typedef struct _SGX_CCB_KICK_KM_ +{ + SGXMKIF_COMMAND sCommand; + IMG_HANDLE hCCBKernelMemInfo; + + IMG_UINT32 ui32NumDstSyncObjects; + IMG_HANDLE hKernelHWSyncListMemInfo; + + /* DST syncs */ + IMG_HANDLE *pahDstSyncHandles; + + IMG_UINT32 ui32NumTAStatusVals; + IMG_UINT32 ui32Num3DStatusVals; + +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + SGX_INTERNEL_STATUS_UPDATE_KM asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS]; + SGX_INTERNEL_STATUS_UPDATE_KM as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS]; +#else + IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS]; + IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS]; +#endif + + IMG_BOOL bFirstKickOrResume; +#if defined(NO_HARDWARE) || defined(PDUMP) + IMG_BOOL bTerminateOrAbort; +#endif + + /* CCB offset of data structure associated with this kick */ + IMG_UINT32 ui32CCBOffset; + +#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) + /* SRC and DST syncs */ + IMG_UINT32 ui32NumTASrcSyncs; + IMG_HANDLE ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS]; + IMG_UINT32 ui32NumTADstSyncs; + IMG_HANDLE ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS]; + IMG_UINT32 ui32Num3DSrcSyncs; + IMG_HANDLE ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS]; +#else + /* SRC syncs */ + IMG_UINT32 ui32NumSrcSyncs; + IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS_TA]; +#endif + + /* TA/3D dependency data */ + IMG_BOOL bTADependency; + IMG_HANDLE hTA3DSyncInfo; + + IMG_HANDLE hTASyncInfo; + IMG_HANDLE h3DSyncInfo; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif +#if defined(NO_HARDWARE) + IMG_UINT32 ui32WriteOpsPendingVal; +#endif +} SGX_CCB_KICK_KM; + + +#if defined(TRANSFER_QUEUE) +typedef struct _PVRSRV_TRANSFER_SGX_KICK_KM_ +{ + IMG_HANDLE hCCBMemInfo; + IMG_UINT32 ui32SharedCmdCCBOffset; + + IMG_DEV_VIRTADDR sHWTransferContextDevVAddr; + + IMG_HANDLE hTASyncInfo; + IMG_HANDLE h3DSyncInfo; + + IMG_UINT32 ui32NumSrcSync; + IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; + + IMG_UINT32 ui32NumDstSync; + IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS]; + + IMG_UINT32 ui32Flags; + + IMG_UINT32 ui32PDumpFlags; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif +} PVRSRV_TRANSFER_SGX_KICK_KM, *PPVRSRV_TRANSFER_SGX_KICK_KM; + +#if defined(SGX_FEATURE_2D_HARDWARE) +typedef struct _PVRSRV_2D_SGX_KICK_KM_ +{ + IMG_HANDLE hCCBMemInfo; + IMG_UINT32 ui32SharedCmdCCBOffset; + + IMG_DEV_VIRTADDR sHW2DContextDevVAddr; + + IMG_UINT32 ui32NumSrcSync; + IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS]; + + /* need to be able to check reads and writes on dest, and update writes */ + IMG_HANDLE hDstSyncInfo; + + /* need to be able to check reads and writes on TA ops, and update writes */ + IMG_HANDLE hTASyncInfo; + + /* need to be able to check reads and writes on 2D ops, and update writes */ + IMG_HANDLE h3DSyncInfo; + + IMG_UINT32 ui32PDumpFlags; +#if defined(PDUMP) + IMG_UINT32 ui32CCBDumpWOff; +#endif +} PVRSRV_2D_SGX_KICK_KM, *PPVRSRV_2D_SGX_KICK_KM; +#endif /* defined(SGX_FEATURE_2D_HARDWARE) */ +#endif /* #if defined(TRANSFER_QUEUE) */ + +/****************************************************************************/ +/* kernel only functions prototypes */ +/****************************************************************************/ +PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_VOID SGXOSTimer(IMG_VOID *pvData); + +IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery, + IMG_UINT32 ui32PDUMPFlags); + +IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags); + +PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery); +PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie); + +PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState); + +IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo); + +IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bDumpSGXRegs); + +PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); + +#if defined(SGX_DYNAMIC_TIMING_INFO) +IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo); +#endif + +/****************************************************************************/ +/* kernel only functions: */ +/****************************************************************************/ +#if defined(NO_HARDWARE) +static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32StatusRegister, + IMG_UINT32 ui32StatusValue, + IMG_UINT32 ui32StatusMask) +{ + IMG_UINT32 ui32RegVal; + + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister); + + ui32RegVal &= ~ui32StatusMask; + ui32RegVal |= (ui32StatusValue & ui32StatusMask); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal); +} +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* __SGXINFOKM_H__ */ + +/***************************************************************************** + End of file (sgxinfokm.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxinit.c b/pvr-source/services4/srvkm/devices/sgx/sgxinit.c new file mode 100644 index 0000000..199aa9d --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxinit.c @@ -0,0 +1,3428 @@ +/*************************************************************************/ /*! +@Title Device specific initialisation routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <stddef.h> + +#include "sgxdefs.h" +#include "sgxmmu.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgx_mkif_km.h" +#include "sgxconfig.h" +#include "sysconfig.h" +#include "pvr_bridge_km.h" + +#include "sgx_bridge_km.h" + +#include "pdump_km.h" +#include "ra.h" +#include "mmu.h" +#include "handle.h" +#include "perproc.h" + +#include "sgxutils.h" +#include "pvrversion.h" +#include "sgx_options.h" + +#include "lists.h" +#include "srvkm.h" +#include "ttrace.h" + +extern int powering_down; + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + +static const IMG_CHAR *SGXUKernelStatusString(IMG_UINT32 code) +{ + switch(code) + { +#define MKTC_ST(x) \ + case x: \ + return #x; +#include "sgx_ukernel_status_codes.h" + default: + return "(Unknown)"; + } +} + +#endif /* defined(PVRSRV_USSE_EDM_STATUS_DEBUG) */ + +#define VAR(x) #x +/* PRQA S 0881 11 */ /* ignore 'order of evaluation' warning */ +#define CHECK_SIZE(NAME) \ +{ \ + if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \ + { \ + PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \ + VAR(NAME), \ + psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \ + psSGXStructSizes->ui32Sizeof_##NAME )); \ + bStructSizesFailed = IMG_TRUE; \ + } \ +} + +#if defined (SYS_USING_INTERRUPTS) +IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData); +#endif + + +static +PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext); +#if defined(PDUMP) +static +PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode); +#endif + +/*! +******************************************************************************* + + @Function SGXCommandComplete + + @Description + + SGX command complete handler + + @Input psDeviceNode - SGX device node + + @Return none + +******************************************************************************/ +static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode) +{ +#if defined(OS_SUPPORTS_IN_LISR) + if (OSInLISR(psDeviceNode->psSysData)) + { + /* + * We shouldn't call SGXScheduleProcessQueuesKM in an + * LISR, as it may attempt to power up SGX. + * We assume that the LISR will schedule the MISR, which + * will test the following flag, and call + * SGXScheduleProcessQueuesKM if the flag is set. + */ + psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE; + } + else + { + SGXScheduleProcessQueuesKM(psDeviceNode); + } +#else + SGXScheduleProcessQueuesKM(psDeviceNode); +#endif +} + +/*! +******************************************************************************* + + @Function DeinitDevInfo + + @Description + + Deinits DevInfo + + @Input none + + @Return none + +******************************************************************************/ +static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + if (psDevInfo->psKernelCCBInfo != IMG_NULL) + { + /* + Free CCB info. + */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL); + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function InitDevInfo + + @Description + + Loads DevInfo + + @Input psDeviceNode + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc, + PVRSRV_DEVICE_NODE *psDeviceNode, +#if defined (SUPPORT_SID_INTERFACE) + SGX_BRIDGE_INIT_INFO_KM *psInitInfo) +#else + SGX_BRIDGE_INIT_INFO *psInitInfo) +#endif +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + + PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL; + + PVR_UNREFERENCED_PARAMETER(psPerProc); + psDevInfo->sScripts = psInitInfo->sScripts; + + psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo; + psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM; + + psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo; + psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM; + + psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo; + psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM; + + psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo; + psDevInfo->psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM; + + psDevInfo->psKernelSGXTA3DCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXTA3DCtlMemInfo; + +#if defined(FIX_HW_BRN_31272) || defined(FIX_HW_BRN_31780) || defined(FIX_HW_BRN_33920) + psDevInfo->psKernelSGXPTLAWriteBackMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXPTLAWriteBackMemInfo; +#endif + + psDevInfo->psKernelSGXMiscMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo; + +#if defined(SGX_SUPPORT_HWPROFILING) + psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo; +#endif +#if defined(SUPPORT_SGX_HWPERF) + psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo; +#endif + psDevInfo->psKernelTASigBufferMemInfo = psInitInfo->hKernelTASigBufferMemInfo; + psDevInfo->psKernel3DSigBufferMemInfo = psInitInfo->hKernel3DSigBufferMemInfo; +#if defined(FIX_HW_BRN_29702) + psDevInfo->psKernelCFIMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCFIMemInfo; +#endif +#if defined(FIX_HW_BRN_29823) + psDevInfo->psKernelDummyTermStreamMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelDummyTermStreamMemInfo; +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && defined(FIX_HW_BRN_31559) + psDevInfo->psKernelVDMSnapShotBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMSnapShotBufferMemInfo; + psDevInfo->psKernelVDMCtrlStreamBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMCtrlStreamBufferMemInfo; +#endif +#if defined(SGX_FEATURE_VDM_CONTEXT_SWITCH) && \ + defined(FIX_HW_BRN_33657) && defined(SUPPORT_SECURE_33657_FIX) + psDevInfo->psKernelVDMStateUpdateBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelVDMStateUpdateBufferMemInfo; +#endif +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo; +#endif + /* + * Assign client-side build options for later verification + */ + psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions; + + /* + * Assign microkernel IF structure sizes for later verification + */ + psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes; + + /* + Setup the kernel version of the CCB control + */ + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_SGX_CCB_INFO), + (IMG_VOID **)&psKernelCCBInfo, 0, + "SGX Circular Command Buffer Info"); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory")); + goto failed_allockernelccb; + } + + + OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO)); + psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo; + psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo; + psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands; + psKernelCCBInfo->pui32WriteOffset = &psDevInfo->psKernelCCBCtl->ui32WriteOffset; + psKernelCCBInfo->pui32ReadOffset = &psDevInfo->psKernelCCBCtl->ui32ReadOffset; + psDevInfo->psKernelCCBInfo = psKernelCCBInfo; + + /* + Copy the USE code addresses for the host kick. + */ + OSMemCopy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr, + SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0])); + + psDevInfo->bForcePTOff = IMG_FALSE; + + psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl; + + psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0; + psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1; + psDevInfo->ui32ClkGateCtl = psInitInfo->ui32ClkGateCtl; + psDevInfo->ui32ClkGateCtl2 = psInitInfo->ui32ClkGateCtl2; + psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg; + psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask; +#if defined(SGX_FEATURE_MP) + psDevInfo->ui32MasterClkGateStatusReg = psInitInfo->ui32MasterClkGateStatusReg; + psDevInfo->ui32MasterClkGateStatusMask = psInitInfo->ui32MasterClkGateStatusMask; + psDevInfo->ui32MasterClkGateStatus2Reg = psInitInfo->ui32MasterClkGateStatus2Reg; + psDevInfo->ui32MasterClkGateStatus2Mask = psInitInfo->ui32MasterClkGateStatus2Mask; +#endif /* SGX_FEATURE_MP */ + + + /* Initialise Dev Data */ + OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData)); + + return PVRSRV_OK; + +failed_allockernelccb: + DeinitDevInfo(psDevInfo); + + return eError; +} + + + + +static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands) +{ + IMG_UINT32 ui32PC; + SGX_INIT_COMMAND *psComm; + + for (ui32PC = 0, psComm = psScript; + ui32PC < ui32NumInitCommands; + ui32PC++, psComm++) + { + switch (psComm->eOp) + { + case SGX_INIT_OP_WRITE_HW_REG: + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value); + PDUMPCOMMENT("SGXRunScript: Write HW reg operation"); + PDUMPREG(SGX_PDUMPREG_NAME, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value); + break; + } + case SGX_INIT_OP_READ_HW_REG: + { + OSReadHWReg(psDevInfo->pvRegsBaseKM, psComm->sReadHWReg.ui32Offset); +#if defined(PDUMP) + PDUMPCOMMENT("SGXRunScript: Read HW reg operation"); + PDumpRegRead(SGX_PDUMPREG_NAME, psComm->sReadHWReg.ui32Offset, PDUMP_FLAGS_CONTINUOUS); +#endif + break; + } +#if defined(PDUMP) + case SGX_INIT_OP_PDUMP_HW_REG: + { + PDUMPCOMMENT("SGXRunScript: Dump HW reg operation"); + PDUMPREG(SGX_PDUMPREG_NAME, psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value); + break; + } +#endif + case SGX_INIT_OP_HALT: + { + return PVRSRV_OK; + } + case SGX_INIT_OP_ILLEGAL: + /* FALLTHROUGH */ + default: + { + PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp)); + return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION; + } + } + + } + + return PVRSRV_ERROR_UNKNOWN_SCRIPT_OPERATION; +} + +#if defined(SUPPORT_MEMORY_TILING) +static PVRSRV_ERROR SGX_AllocMemTilingRangeInt(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32End, + IMG_UINT32 ui32TilingStride, + IMG_UINT32 *pui32RangeIndex) +{ + IMG_UINT32 i; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Val; + + /* HW supports 10 ranges */ + for(i=0; i < SGX_BIF_NUM_TILING_RANGES; i++) + { + if((psDevInfo->ui32MemTilingUsage & (1U << i)) == 0) + { + /* mark in use */ + psDevInfo->ui32MemTilingUsage |= 1U << i; + /* output range index if the caller wants it */ + if(pui32RangeIndex != IMG_NULL) + { + *pui32RangeIndex = i; + } + goto RangeAllocated; + } + } + + PVR_DPF((PVR_DBG_ERROR,"SGX_AllocMemTilingRange: all tiling ranges in use")); + return PVRSRV_ERROR_EXCEEDED_HW_LIMITS; + +RangeAllocated: + + /* An improperly aligned range could cause BIF not to tile some memory which is intended to be tiled, + * or cause BIF to tile some memory which is not intended to be. + */ + if(ui32Start & ~SGX_BIF_TILING_ADDR_MASK) + { + PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range start (0x%08X) fails" + "alignment test", ui32Start)); + } + if((ui32End + 0x00001000) & ~SGX_BIF_TILING_ADDR_MASK) + { + PVR_DPF((PVR_DBG_WARNING,"SGX_AllocMemTilingRangeInt: Tiling range end (0x%08X) fails" + "alignment test", ui32End)); + } + + ui32Offset = EUR_CR_BIF_TILE0 + (i<<2); + + ui32Val = ((ui32TilingStride << EUR_CR_BIF_TILE0_CFG_SHIFT) & EUR_CR_BIF_TILE0_CFG_MASK) + | (((ui32End>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK) + | (((ui32Start>>SGX_BIF_TILING_ADDR_LSB) << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT) & EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK) + | (EUR_CR_BIF_TILE0_ENABLE << EUR_CR_BIF_TILE0_CFG_SHIFT); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val); + PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val); + +#if defined(SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS) + ui32Offset = EUR_CR_BIF_TILE0_ADDR_EXT + (i<<2); + + ui32Val = (((ui32End>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MAX_MASK) + | (((ui32Start>>SGX_BIF_TILING_EXT_ADDR_LSB) << EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SHIFT) & EUR_CR_BIF_TILE0_ADDR_EXT_MIN_MASK); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val); + PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val); +#endif /* SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS */ + + return PVRSRV_OK; +} + +#endif /* SUPPORT_MEMORY_TILING */ + +/*! +******************************************************************************* + + @Function SGXInitialise + + @Description + + (client invoked) chip-reset and initialisation + + @Input pvDeviceNode - device info. structure + @Input bHardwareRecovery - true if recovering powered hardware, + false if powering up + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery) +{ + PVRSRV_ERROR eError; + PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo; + SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM; + static IMG_BOOL bFirstTime = IMG_TRUE; +#if defined(PDUMP) + IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended(); +#endif /* PDUMP */ + +#if defined(SGX_FEATURE_MP) + /* Slave core clocks must be enabled during reset */ +#else + SGXInitClocks(psDevInfo, PDUMP_FLAGS_CONTINUOUS); +#endif /* SGX_FEATURE_MP */ + + /* + Part 1 of the initialisation script runs before resetting SGX. + */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 1\n"); + eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1, SGX_MAX_INIT_COMMANDS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 1) failed (%d)", eError)); + return eError; + } + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 1\n"); + + /* Reset the chip */ + psDevInfo->ui32NumResets++; + +#if !defined(SGX_FEATURE_MP) + bHardwareRecovery |= bFirstTime; +#endif /* SGX_FEATURE_MP */ + + SGXReset(psDevInfo, bHardwareRecovery, PDUMP_FLAGS_CONTINUOUS); + +#if defined(EUR_CR_POWER) +#if defined(SGX531) + /* + Disable half the pipes. + 531 has 2 pipes within a 4 pipe framework, so + the 2 redundant pipes must be disabled even + though they do not exist. + */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 1); +#else + /* set the default pipe count (all fully enabled) */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_POWER, 0); +#endif +#endif + + /* Initialise the kernel CCB event kicker value */ + *psDevInfo->pui32KernelCCBEventKicker = 0; +#if defined(PDUMP) + if (!bPDumpIsSuspended) + { + psDevInfo->ui32KernelCCBEventKickerDumpVal = 0; + PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal, + psDevInfo->psKernelCCBEventKickerMemInfo, 0, + sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + } +#endif /* PDUMP */ + +#if defined(SUPPORT_MEMORY_TILING) + { + /* Initialise EUR_CR_BIF_TILE registers for any tiling heaps */ + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDevInfo->pvDeviceMemoryHeap; + IMG_UINT32 i; + + psDevInfo->ui32MemTilingUsage = 0; + + for(i=0; i<psDevInfo->ui32HeapCount; i++) + { + if(psDeviceMemoryHeap[i].ui32XTileStride > 0) + { + /* Set up the HW control registers */ + eError = SGX_AllocMemTilingRangeInt( + psDevInfo, + psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr, + psDeviceMemoryHeap[i].sDevVAddrBase.uiAddr + + psDeviceMemoryHeap[i].ui32HeapSize, + psDeviceMemoryHeap[i].ui32XTileStride, + NULL); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "Unable to allocate SGX BIF tiling range for heap: %s", + psDeviceMemoryHeap[i].pszName)); + break; + } + } + } + } +#endif + + /* + Part 2 of the initialisation script runs after resetting SGX. + */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 2\n"); + eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2, SGX_MAX_INIT_COMMANDS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 2) failed (%d)", eError)); + return eError; + } + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n"); + + /* Record the system timestamp for the microkernel */ + psSGXHostCtl->ui32HostClock = OSClockus(); + + psSGXHostCtl->ui32InitStatus = 0; +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Reset the SGX microkernel initialisation status\n"); + PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32InitStatus), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psSGXHostCtlMemInfo)); + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Initialise the microkernel\n"); +#endif /* PDUMP */ + +#if defined(SGX_FEATURE_MULTI_EVENT_KICK) + OSWriteMemoryBarrier(); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), + EUR_CR_EVENT_KICK2_NOW_MASK); +#else + *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF; + OSWriteMemoryBarrier(); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), + EUR_CR_EVENT_KICK_NOW_MASK); +#endif /* SGX_FEATURE_MULTI_EVENT_KICK */ + + OSMemoryBarrier(); + +#if defined(PDUMP) + /* + Dump the host kick. + */ + if (!bPDumpIsSuspended) + { +#if defined(SGX_FEATURE_MULTI_EVENT_KICK) + PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK); +#else + psDevInfo->ui32KernelCCBEventKickerDumpVal = 1; + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "First increment of the SGX event kicker value\n"); + PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal, + psDevInfo->psKernelCCBEventKickerMemInfo, + 0, + sizeof(IMG_UINT32), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + PDUMPREG(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK); +#endif /* SGX_FEATURE_MULTI_EVENT_KICK */ + } +#endif /* PDUMP */ + +#if !defined(NO_HARDWARE) + /* + Wait for the microkernel to finish initialising. + */ + if (PollForValueKM(&psSGXHostCtl->ui32InitStatus, + PVRSRV_USSE_EDM_INIT_COMPLETE, + PVRSRV_USSE_EDM_INIT_COMPLETE, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed")); + + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + + return PVRSRV_ERROR_RETRY; + } +#endif /* NO_HARDWARE */ + +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "Wait for the SGX microkernel initialisation to complete"); + PDUMPMEMPOL(psSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32InitStatus), + PVRSRV_USSE_EDM_INIT_COMPLETE, + PVRSRV_USSE_EDM_INIT_COMPLETE, + PDUMP_POLL_OPERATOR_EQUAL, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psSGXHostCtlMemInfo)); +#endif /* PDUMP */ + + PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset == psDevInfo->psKernelCCBCtl->ui32WriteOffset); + + bFirstTime = IMG_FALSE; + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function SGXDeinitialise + + @Description + + (client invoked) chip-reset and deinitialisation + + @Input hDevCookie - device info. handle + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie) + +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie; + PVRSRV_ERROR eError; + + /* Did SGXInitialise map the SGX registers in? */ + if (psDevInfo->pvRegsBaseKM == IMG_NULL) + { + return PVRSRV_OK; + } + + eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError)); + return eError; + } + + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + + @Function DevInitSGXPart1 + + @Description + + Reset and initialise Chip + + @Input pvDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode) +{ + IMG_HANDLE hDevMemHeap = IMG_NULL; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_HANDLE hKernelDevMemContext; + IMG_DEV_PHYADDR sPDDevPAddr; + IMG_UINT32 i; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap; + PVRSRV_ERROR eError; + + /* pdump info about the core */ + PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME); + + #if defined(SGX_FEATURE_MP) + #if !defined(SGX_FEATURE_MP_PLUS) + PDUMPCOMMENT("SGX Multi-processor: %d cores", SGX_FEATURE_MP_CORE_COUNT); + #else + PDUMPCOMMENT("SGX Multi-processor: %d TA cores, %d 3D cores", SGX_FEATURE_MP_CORE_COUNT_TA, SGX_FEATURE_MP_CORE_COUNT_3D); + #endif + #endif /* SGX_FEATURE_MP */ + +#if (SGX_CORE_REV == 0) + PDUMPCOMMENT("SGX Core Revision Information: head RTL"); +#else + PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV); +#endif + + #if defined(SGX_FEATURE_SYSTEM_CACHE) + PDUMPCOMMENT("SGX System Level Cache is present\r\n"); + #if defined(SGX_BYPASS_SYSTEM_CACHE) + PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n"); + #endif /* SGX_BYPASS_SYSTEM_CACHE */ + #endif /* SGX_FEATURE_SYSTEM_CACHE */ + + PDUMPCOMMENT("SGX Initialisation Part 1"); + + /* Allocate device control block */ + if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_SGXDEV_INFO), + (IMG_VOID **)&psDevInfo, IMG_NULL, + "SGX Device Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO)); + + /* setup info from jdisplayconfig.h (variations controlled by build) */ + psDevInfo->eDeviceType = DEV_DEVICE_TYPE; + psDevInfo->eDeviceClass = DEV_DEVICE_CLASS; + + /* Initialize SGX idle status */ + psDevInfo->bSGXIdle = IMG_TRUE; + + /* Store the devinfo as its needed by dynamically enumerated systems called from BM */ + psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo; + + /* get heap info from the devnode */ + psDevInfo->ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount; + psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap; + + /* create the kernel memory context */ + hKernelDevMemContext = BM_CreateContext(psDeviceNode, + &sPDDevPAddr, + IMG_NULL, + IMG_NULL); + if (hKernelDevMemContext == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1: Failed BM_CreateContext")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psDevInfo->sKernelPDDevPAddr = sPDDevPAddr; + + /* create the kernel, shared and shared_exported heaps */ + for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++) + { + switch(psDeviceMemoryHeap[i].DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_KERNEL: + case DEVICE_MEMORY_HEAP_SHARED: + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + /* Shared PB heap could be zero size */ + if (psDeviceMemoryHeap[i].ui32HeapSize > 0) + { + hDevMemHeap = BM_CreateHeap (hKernelDevMemContext, + &psDeviceMemoryHeap[i]); + /* + in the case of kernel context heaps just store + the heap handle in the heap info structure + */ + psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap; + } + break; + } + } + } +#if defined(PDUMP) + if(hDevMemHeap) + { + /* set up the MMU pdump info */ + psDevInfo->sMMUAttrib = *((BM_HEAP*)hDevMemHeap)->psMMUAttrib; + } +#endif + eError = MMU_BIFResetPDAlloc(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset")); + return eError; + } + + return PVRSRV_OK; +} + +/*! +******************************************************************************* + + @Function SGXGetInfoForSrvinitKM + + @Description + + Get SGX related information necessary for initilisation server + + @Input hDevHandle - device handle + psInitInfo - pointer to structure for returned information + + @Output psInitInfo - pointer to structure containing returned information + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, PVRSRV_HEAP_INFO_KM *pasHeapInfo, IMG_DEV_PHYADDR *psPDDevPAddr) +#else +PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo) +#endif +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + + PDUMPCOMMENT("SGXGetInfoForSrvinit"); + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle; + psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + +#if defined (SUPPORT_SID_INTERFACE) + *psPDDevPAddr = psDevInfo->sKernelPDDevPAddr; + + eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, pasHeapInfo); +#else + psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr; + + eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]); +#endif + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError)); + return eError; + } + + return eError; +} + +/*! +******************************************************************************* + + @Function DevInitSGXPart2KM + + @Description + + Reset and initialise Chip + + @Input pvDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_HANDLE hDevHandle, +#if defined (SUPPORT_SID_INTERFACE) + SGX_BRIDGE_INIT_INFO_KM *psInitInfo) +#else + SGX_BRIDGE_INIT_INFO *psInitInfo) +#endif +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo; + PVRSRV_ERROR eError; + SGX_DEVICE_MAP *psSGXDeviceMap; + PVRSRV_DEV_POWER_STATE eDefaultPowerState; + + PDUMPCOMMENT("SGX Initialisation Part 2"); + + psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle; + psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + + /* + Init devinfo + */ + eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program")); + goto failed_init_dev_info; + } + + + eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!")); + return PVRSRV_ERROR_INIT_FAILURE; + } + + /* Registers already mapped? */ + if (psSGXDeviceMap->pvRegsCpuVBase) + { + psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase; + } + else + { + /* Map Regs */ + psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase, + psSGXDeviceMap->ui32RegsSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + if (!psDevInfo->pvRegsBaseKM) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n")); + return PVRSRV_ERROR_BAD_MAPPING; + } + } + psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize; + psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase; + + +#if defined(SGX_FEATURE_HOST_PORT) + if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT) + { + /* Map Host Port */ + psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase, + psSGXDeviceMap->ui32HPSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + if (!psDevInfo->pvHostPortBaseKM) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in host port\n")); + return PVRSRV_ERROR_BAD_MAPPING; + } + psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize; + psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase; + } +#endif/* #ifdef SGX_FEATURE_HOST_PORT */ + +#if defined (SYS_USING_INTERRUPTS) + + /* Set up ISR callback information. */ + psDeviceNode->pvISRData = psDeviceNode; + /* ISR handler address was set up earlier */ + PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler); + +#endif /* SYS_USING_INTERRUPTS */ + + /* Prevent the microkernel being woken up before there is something to do. */ + psDevInfo->psSGXHostCtl->ui32PowerStatus |= PVRSRV_USSE_EDM_POWMAN_NO_WORK; + eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF; + /* Register the device with the power manager. */ + eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex, + &SGXPrePowerState, &SGXPostPowerState, + &SGXPreClockSpeedChange, &SGXPostClockSpeedChange, + (IMG_HANDLE)psDeviceNode, + PVRSRV_DEV_POWER_STATE_OFF, + eDefaultPowerState); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager")); + return eError; + } + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* map the external system cache control registers into the SGX MMU */ + psDevInfo->ui32ExtSysCacheRegsSize = psSGXDeviceMap->ui32ExtSysCacheRegsSize; + psDevInfo->sExtSysCacheRegsDevPBase = psSGXDeviceMap->sExtSysCacheRegsDevPBase; + eError = MMU_MapExtSystemCacheRegs(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to map external system cache registers")); + return eError; + } +#endif /* SUPPORT_EXTERNAL_SYSTEM_CACHE */ + + /* + Initialise the Kernel CCB + */ + OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB)); + OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL)); + OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker)); + PDUMPCOMMENT("Initialise Kernel CCB"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo)); + PDUMPCOMMENT("Initialise Kernel CCB Control"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo)); + PDUMPCOMMENT("Initialise Kernel CCB Event Kicker"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + + return PVRSRV_OK; + +failed_init_dev_info: + return eError; +} + +/*! +******************************************************************************* + + @Function DevDeInitSGX + + @Description + + Reset and deinitialise Chip + + @Input pvDeviceNode - device info. structure + + @Return PVRSRV_ERROR + +******************************************************************************/ +static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + PVRSRV_ERROR eError; + IMG_UINT32 ui32Heap; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + SGX_DEVICE_MAP *psSGXDeviceMap; + + if (!psDevInfo) + { + /* Can happen if DevInitSGX failed */ + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo")); + return PVRSRV_OK; + } + +#if defined(SUPPORT_HW_RECOVERY) + if (psDevInfo->hTimer) + { + eError = OSRemoveTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer")); + return eError; + } + psDevInfo->hTimer = IMG_NULL; + } +#endif /* SUPPORT_HW_RECOVERY */ + +#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE) + /* unmap the external system cache control registers */ + eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to unmap ext system cache registers")); + return eError; + } +#endif /* SUPPORT_EXTERNAL_SYSTEM_CACHE */ + + MMU_BIFResetPDFree(psDevInfo); + + /* + DeinitDevInfo the DevInfo + */ + DeinitDevInfo(psDevInfo); + + /* Destroy heaps. */ + psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap; + for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++) + { + switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType) + { + case DEVICE_MEMORY_HEAP_KERNEL: + case DEVICE_MEMORY_HEAP_SHARED: + case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: + { + if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL) + { + BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap); + } + break; + } + } + } + + /* Destroy the kernel context. */ + eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_NULL); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context")); + return eError; + } + + /* remove the device from the power manager */ + eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMap); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to get device memory map!")); + return eError; + } + + /* Only unmap the registers if they were mapped here */ + if (!psSGXDeviceMap->pvRegsCpuVBase) + { + /* UnMap Regs */ + if (psDevInfo->pvRegsBaseKM != IMG_NULL) + { + OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM, + psDevInfo->ui32RegSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + } + } + +#if defined(SGX_FEATURE_HOST_PORT) + if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT) + { + /* unMap Host Port */ + if (psDevInfo->pvHostPortBaseKM != IMG_NULL) + { + OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM, + psDevInfo->ui32HPSize, + PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, + IMG_NULL); + } + } +#endif /* #ifdef SGX_FEATURE_HOST_PORT */ + + + /* DeAllocate devinfo */ + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_SGXDEV_INFO), + psDevInfo, + 0); + + psDeviceNode->pvDevice = IMG_NULL; + + if (psDeviceMemoryHeap != IMG_NULL) + { + /* Free the device memory heap info. */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID, + psDeviceMemoryHeap, + 0); + } + + return PVRSRV_OK; +} + + +#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP) + +/*! +******************************************************************************* + + @Function SGXDumpMasterDebugReg + + @Description + + Dump a single SGX debug register value + + @Input psDevInfo - SGX device info + @Input pszName - string used for logging + @Input ui32RegAddr - SGX register offset + + @Return IMG_VOID + +******************************************************************************/ +static IMG_VOID SGXDumpMasterDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_CHAR *pszName, + IMG_UINT32 ui32RegAddr) +{ + IMG_UINT32 ui32RegVal; + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32RegAddr); + PVR_LOG(("(HYD) %s%08X", pszName, ui32RegVal)); +} + +#endif /* defined(RESTRICTED_REGISTERS) */ + +/*! +******************************************************************************* + + @Function SGXDumpDebugReg + + @Description + + Dump a single SGX debug register value + + @Input psDevInfo - SGX device info + @Input ui32CoreNum - processor number + @Input pszName - string used for logging + @Input ui32RegAddr - SGX register offset + + @Return IMG_VOID + +******************************************************************************/ +static IMG_VOID SGXDumpDebugReg (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32CoreNum, + IMG_CHAR *pszName, + IMG_UINT32 ui32RegAddr) +{ + IMG_UINT32 ui32RegVal; + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(ui32RegAddr, ui32CoreNum)); + PVR_LOG(("(P%u) %s%08X", ui32CoreNum, pszName, ui32RegVal)); +} + +#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) || defined(FIX_HW_BRN_31620) +static INLINE IMG_UINT32 GetDirListBaseReg(IMG_UINT32 ui32Index) +{ + if (ui32Index == 0) + { + return EUR_CR_BIF_DIR_LIST_BASE0; + } + else + { + return (EUR_CR_BIF_DIR_LIST_BASE1 + ((ui32Index - 1) * 0x4)); + } +} +#endif + +void dsscomp_kdump(void); +/*! +******************************************************************************* + + @Function SGXDumpDebugInfo + + @Description + + Dump useful debugging info + + @Input psDevInfo - SGX device info + @Input bDumpSGXRegs - Whether to dump SGX debug registers. Must not be done + when SGX is not powered. + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXDumpDebugInfo (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bDumpSGXRegs) +{ + IMG_UINT32 ui32CoreNum; + + dsscomp_kdump(); + + PVR_LOG(("SGX debug (%s)", PVRVERSION_STRING)); + + if (bDumpSGXRegs) + { + PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Linear): 0x%08X", (IMG_UINTPTR_T)psDevInfo->pvRegsBaseKM)); + PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Physical): 0x%08X", psDevInfo->sRegsPhysBase.uiAddr)); + + SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_ID: ", EUR_CR_CORE_ID); + SGXDumpDebugReg(psDevInfo, 0, "EUR_CR_CORE_REVISION: ", EUR_CR_CORE_REVISION); +#if defined(RESTRICTED_REGISTERS) && defined(SGX_FEATURE_MP) + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_INT_STAT: ", EUR_CR_MASTER_BIF_INT_STAT); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_FAULT: ",EUR_CR_MASTER_BIF_FAULT); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_CLKGATESTATUS2: ",EUR_CR_MASTER_CLKGATESTATUS2 ); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_PIM_STATUS: ",EUR_CR_MASTER_VDM_PIM_STATUS); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_BIF_BANK_SET: ",EUR_CR_MASTER_BIF_BANK_SET); + + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS: ",EUR_CR_MASTER_EVENT_STATUS); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_EVENT_STATUS2: ",EUR_CR_MASTER_EVENT_STATUS2); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_MP_PRIMITIVE: ",EUR_CR_MASTER_MP_PRIMITIVE); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_DPLIST_STATUS: ",EUR_CR_MASTER_DPM_DPLIST_STATUS); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC: ",EUR_CR_MASTER_DPM_PROACTIVE_PIM_SPEC); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_PAGE_MANAGEOP: ",EUR_CR_MASTER_DPM_PAGE_MANAGEOP); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_SNAPSHOT); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_LOAD_STATUS); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STREAM); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATUS); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE0); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1: ",EUR_CR_MASTER_VDM_CONTEXT_STORE_STATE1); + SGXDumpMasterDebugReg(psDevInfo, "EUR_CR_MASTER_VDM_WAIT_FOR_KICK: ",EUR_CR_MASTER_VDM_WAIT_FOR_KICK); +#endif + for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++) + { + /* Dump HW event status */ + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS: ", EUR_CR_EVENT_STATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_STATUS2: ", EUR_CR_EVENT_STATUS2); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_CTRL: ", EUR_CR_BIF_CTRL); + #if defined(EUR_CR_BIF_BANK0) + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_BANK0: ", EUR_CR_BIF_BANK0); + #endif + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_INT_STAT: ", EUR_CR_BIF_INT_STAT); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_FAULT: ", EUR_CR_BIF_FAULT); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_MEM_REQ_STAT: ", EUR_CR_BIF_MEM_REQ_STAT); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATECTL: ", EUR_CR_CLKGATECTL); + #if defined(EUR_CR_PDS_PC_BASE) + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_PDS_PC_BASE: ", EUR_CR_PDS_PC_BASE); + #endif +#if defined(RESTRICTED_REGISTERS) + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_BIF_BANK_SET: ", EUR_CR_BIF_BANK_SET); + + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATECTL: ", EUR_CR_CLKGATECTL); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_CLKGATESTATUS: ", EUR_CR_CLKGATESTATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_MTE_CTRL: ", EUR_CR_MTE_CTRL); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_OTHER_PDS_EXEC: ", EUR_CR_EVENT_OTHER_PDS_EXEC); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_OTHER_PDS_DATA: ", EUR_CR_EVENT_OTHER_PDS_DATA); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_EVENT_OTHER_PDS_INFO: ", EUR_CR_EVENT_OTHER_PDS_INFO); + + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_ZLS_PAGE_THRESHOLD: ", EUR_CR_DPM_ZLS_PAGE_THRESHOLD); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_TA_GLOBAL_LIST: ", EUR_CR_DPM_TA_GLOBAL_LIST); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_STATE_CONTEXT_ID: ", EUR_CR_DPM_STATE_CONTEXT_ID); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_CONTEXT_PB_BASE: ", EUR_CR_DPM_CONTEXT_PB_BASE); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS1: ", EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS1); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_3D_FREE_LIST_STATUS1: ", EUR_CR_DPM_3D_FREE_LIST_STATUS1); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS2: ", EUR_CR_DPM_TA_ALLOC_FREE_LIST_STATUS2); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_3D_FREE_LIST_STATUS2: ", EUR_CR_DPM_3D_FREE_LIST_STATUS2); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_ABORT_STATUS_MTILE: ", EUR_CR_DPM_ABORT_STATUS_MTILE); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_PAGE_STATUS: ", EUR_CR_DPM_PAGE_STATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_PAGE: ", EUR_CR_DPM_PAGE); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_GLOBAL_PAGE_STATUS: ", EUR_CR_DPM_GLOBAL_PAGE_STATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_LOAD_STATUS: ", EUR_CR_VDM_CONTEXT_LOAD_STATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_STORE_STATUS: ", EUR_CR_VDM_CONTEXT_STORE_STATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_TASK_KICK_STATUS: ", EUR_CR_VDM_TASK_KICK_STATUS); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_STORE_STATE0: ", EUR_CR_VDM_CONTEXT_STORE_STATE0); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_VDM_CONTEXT_STORE_STATE1: ", EUR_CR_VDM_CONTEXT_STORE_STATE1); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_REQUESTING: ", EUR_CR_DPM_REQUESTING); + SGXDumpDebugReg(psDevInfo, ui32CoreNum, "EUR_CR_DPM_REQUESTING: ", EUR_CR_DPM_REQUESTING); + +#endif + } + + #if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && !defined(FIX_HW_BRN_31620) + { + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32PDDevPAddr; + + /* + If there was a SGX pagefault check the page table too see if the + host thinks the fault is correct + */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); + if (ui32RegVal & EUR_CR_BIF_INT_STAT_PF_N_RW_MASK) + { + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); + ui32RegVal &= EUR_CR_BIF_FAULT_ADDR_MASK; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0); + ui32PDDevPAddr &= EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK; + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32RegVal); + } + } + #else + { + IMG_UINT32 ui32FaultAddress; + IMG_UINT32 ui32Bank0; + IMG_UINT32 ui32DirListIndex; + IMG_UINT32 ui32PDDevPAddr; + + ui32FaultAddress = OSReadHWReg(psDevInfo->pvRegsBaseKM, + EUR_CR_BIF_FAULT); + ui32FaultAddress = ui32FaultAddress & EUR_CR_BIF_FAULT_ADDR_MASK; + + ui32Bank0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0); + + /* Check the EDM's's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_EDM_MASK) >> EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking EDM memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + /* Check the TA's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_TA_MASK) >> EUR_CR_BIF_BANK0_INDEX_TA_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking TA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + /* Check the 3D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_3D_MASK) >> EUR_CR_BIF_BANK0_INDEX_3D_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking 3D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + + #if defined(EUR_CR_BIF_BANK0_INDEX_2D_MASK) + /* Check the 2D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_2D_MASK) >> EUR_CR_BIF_BANK0_INDEX_2D_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking 2D memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + #endif + + #if defined(EUR_CR_BIF_BANK0_INDEX_PTLA_MASK) + /* Check the 2D's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_PTLA_MASK) >> EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking PTLA memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + #endif + + #if defined(EUR_CR_BIF_BANK0_INDEX_HOST_MASK) + /* Check the Host's memory context */ + ui32DirListIndex = (ui32Bank0 & EUR_CR_BIF_BANK0_INDEX_HOST_MASK) >> EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT; + ui32PDDevPAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, + GetDirListBaseReg(ui32DirListIndex)); + PVR_LOG(("Checking Host memory context (index = %d, PD = 0x%08x)", ui32DirListIndex, ui32PDDevPAddr)); + MMU_CheckFaultAddr(psDevInfo, ui32PDDevPAddr, ui32FaultAddress); + #endif + } + #endif + } + /* + Dump out the outstanding queue items. + */ + QueueDumpDebugInfo(); + + { + /* + Dump out the Host control. + */ + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + IMG_UINT32 *pui32HostCtlBuffer = (IMG_UINT32 *)psSGXHostCtl; + IMG_UINT32 ui32LoopCounter; + + if (psSGXHostCtl->ui32AssertFail != 0) + { + PVR_LOG(("SGX Microkernel assert fail: 0x%08X", psSGXHostCtl->ui32AssertFail)); + psSGXHostCtl->ui32AssertFail = 0; + } + + PVR_LOG(("SGX Host control:")); + + for (ui32LoopCounter = 0; + ui32LoopCounter < sizeof(*psDevInfo->psSGXHostCtl) / sizeof(*pui32HostCtlBuffer); + ui32LoopCounter += 4) + { + PVR_LOG(("\t(HC-%X) 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32HostCtlBuffer), + pui32HostCtlBuffer[ui32LoopCounter + 0], pui32HostCtlBuffer[ui32LoopCounter + 1], + pui32HostCtlBuffer[ui32LoopCounter + 2], pui32HostCtlBuffer[ui32LoopCounter + 3])); + } + } + + { + /* + Dump out the TA/3D control. + */ + IMG_UINT32 *pui32TA3DCtlBuffer = psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM; + IMG_UINT32 ui32LoopCounter; + + PVR_LOG(("SGX TA/3D control:")); + + for (ui32LoopCounter = 0; + ui32LoopCounter < psDevInfo->psKernelSGXTA3DCtlMemInfo->uAllocSize / sizeof(*pui32TA3DCtlBuffer); + ui32LoopCounter += 4) + { + PVR_LOG(("\t(T3C-%X) 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer), + pui32TA3DCtlBuffer[ui32LoopCounter + 0], pui32TA3DCtlBuffer[ui32LoopCounter + 1], + pui32TA3DCtlBuffer[ui32LoopCounter + 2], pui32TA3DCtlBuffer[ui32LoopCounter + 3])); + } + } + + #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + { + IMG_UINT32 *pui32MKTraceBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM; + IMG_UINT32 ui32LastStatusCode, ui32WriteOffset; + + ui32LastStatusCode = *pui32MKTraceBuffer; + pui32MKTraceBuffer++; + ui32WriteOffset = *pui32MKTraceBuffer; + pui32MKTraceBuffer++; + + PVR_LOG(("Last SGX microkernel status code: %08X %s", + ui32LastStatusCode, SGXUKernelStatusString(ui32LastStatusCode))); + + #if defined(PVRSRV_DUMP_MK_TRACE) + /* + Dump the raw microkernel trace buffer to the log. + */ + { + IMG_UINT32 ui32LoopCounter; + + for (ui32LoopCounter = 0; + ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE; + ui32LoopCounter++) + { + IMG_UINT32 *pui32BufPtr; + pui32BufPtr = pui32MKTraceBuffer + + (((ui32WriteOffset + ui32LoopCounter) % SGXMK_TRACE_BUFFER_SIZE) * 4); + PVR_LOG(("\t(MKT-%X) %08X %08X %08X %08X %s", ui32LoopCounter, + pui32BufPtr[2], pui32BufPtr[3], pui32BufPtr[1], pui32BufPtr[0], + SGXUKernelStatusString(pui32BufPtr[0]))); + } + } + #endif /* PVRSRV_DUMP_MK_TRACE */ + } + #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */ + + { + /* + Dump out the kernel CCB. + */ + PVR_LOG(("SGX Kernel CCB WO:0x%X RO:0x%X", + psDevInfo->psKernelCCBCtl->ui32WriteOffset, + psDevInfo->psKernelCCBCtl->ui32ReadOffset)); + + #if defined(PVRSRV_DUMP_KERNEL_CCB) + { + IMG_UINT32 ui32LoopCounter; + + for (ui32LoopCounter = 0; + ui32LoopCounter < sizeof(psDevInfo->psKernelCCB->asCommands) / + sizeof(psDevInfo->psKernelCCB->asCommands[0]); + ui32LoopCounter++) + { + SGXMKIF_COMMAND *psCommand = &psDevInfo->psKernelCCB->asCommands[ui32LoopCounter]; + + PVR_LOG(("\t(KCCB-%X) %08X %08X - %08X %08X %08X %08X", ui32LoopCounter, + psCommand->ui32ServiceAddress, psCommand->ui32CacheControl, + psCommand->ui32Data[0], psCommand->ui32Data[1], + psCommand->ui32Data[2], psCommand->ui32Data[3])); + } + } + #endif /* PVRSRV_DUMP_KERNEL_CCB */ + } + #if defined (TTRACE) + PVRSRVDumpTimeTraceBuffers(); + #endif + +} + + +#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY) +/*! +******************************************************************************* + + @Function HWRecoveryResetSGX + + @Description + + Resets SGX + + Note: may be called from an ISR so should not call pdump. + + @Input psDevInfo - dev info + + @Input ui32Component - core component to reset + + @Return IMG_VOID + +******************************************************************************/ +static +IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32Component, + IMG_UINT32 ui32CallerID) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + +#if defined(SUPPORT_HWRECOVERY_TRACE_LIMIT) + static IMG_UINT32 ui32Clockinus = 0; + static IMG_UINT32 ui32HWRecoveryCount=0; + IMG_UINT32 ui32TempClockinus=0; +#endif + + PVR_UNREFERENCED_PARAMETER(ui32Component); + + /* Debug dumps associated with HWR can be long. Delay system suspend */ + SysLockSystemSuspend(); + + /* + Ensure that hardware recovery is serialised with any power transitions. + */ + eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); + if(eError != PVRSRV_OK) + { + /* + Unable to obtain lock because there is already a power transition + in progress. + */ + PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress")); + return; + } + + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR; + + PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered")); + +#if defined(SUPPORT_HWRECOVERY_TRACE_LIMIT) +/* + * The following defines are system specific and should be defined in + * the corresponding sysconfig.h file. The values indicated are examples only. + SYS_SGX_HWRECOVERY_TRACE_RESET_TIME_PERIOD 5000000 //(5 Seconds) + SYS_SGX_MAX_HWRECOVERY_OCCURANCE_COUNT 5 + */ + ui32TempClockinus = OSClockus(); + if((ui32TempClockinus-ui32Clockinus) < SYS_SGX_HWRECOVERY_TRACE_RESET_TIME_PERIOD){ + ui32HWRecoveryCount++; + if(SYS_SGX_MAX_HWRECOVERY_OCCURANCE_COUNT <= ui32HWRecoveryCount){ + OSPanic(); + } + }else{ + ui32Clockinus = ui32TempClockinus; + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE); + ui32HWRecoveryCount = 0; + } +#else + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE); +#endif + + /* Suspend pdumping. */ + PDUMPSUSPEND(); + + /* Reset and re-initialise SGX. */ + eError = SGXInitialise(psDevInfo, IMG_TRUE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError)); + } + + /* Resume pdumping. */ + PDUMPRESUME(); + + PVRSRVPowerUnlock(ui32CallerID); + + SysUnlockSystemSuspend(); + + /* Send a dummy kick so that we start processing again */ + SGXScheduleProcessQueuesKM(psDeviceNode); + + /* Flush any old commands from the queues. */ + PVRSRVProcessQueues(IMG_TRUE); +} +#endif /* #if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY) */ + + +#if defined(SUPPORT_HW_RECOVERY) +/*! +****************************************************************************** + + @Function SGXOSTimer + + @Description + + Timer function for SGX + + @Input pvData - private data + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_VOID SGXOSTimer(IMG_VOID *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = pvData; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + static IMG_UINT32 ui32EDMTasks = 0; + static IMG_UINT32 ui32LockupCounter = 0; /* To prevent false positives */ + static IMG_UINT32 ui32OpenCLDelayCounter = 0; + static IMG_UINT32 ui32NumResets = 0; +#if defined(FIX_HW_BRN_31093) + static IMG_BOOL bBRN31093Inval = IMG_FALSE; +#endif + IMG_UINT32 ui32CurrentEDMTasks; + IMG_UINT32 ui32CurrentOpenCLDelayCounter=0; + IMG_BOOL bLockup = IMG_FALSE; + IMG_BOOL bPoweredDown; + + /* increment a timestamp */ + psDevInfo->ui32TimeStamp++; + +#if defined(NO_HARDWARE) + bPoweredDown = IMG_TRUE; +#else + bPoweredDown = (SGXIsDevicePowered(psDeviceNode)) ? IMG_FALSE : IMG_TRUE; +#endif /* NO_HARDWARE */ + + /* + * Check whether EDM timer tasks are getting scheduled. If not, assume + * that SGX has locked up and reset the chip. + */ + + /* Check whether the timer should be running */ + if (bPoweredDown) + { + ui32LockupCounter = 0; + #if defined(FIX_HW_BRN_31093) + bBRN31093Inval = IMG_FALSE; + #endif + } + else + { + /* The PDS timer should be running. */ + ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0); + if (psDevInfo->ui32EDMTaskReg1 != 0) + { + ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1); + } + if ((ui32CurrentEDMTasks == ui32EDMTasks) && + (psDevInfo->ui32NumResets == ui32NumResets)) + { + ui32LockupCounter++; + if (ui32LockupCounter == 3) + { + ui32LockupCounter = 0; + ui32CurrentOpenCLDelayCounter = (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount; + if(0 != ui32CurrentOpenCLDelayCounter) + { + if(ui32OpenCLDelayCounter != ui32CurrentOpenCLDelayCounter){ + ui32OpenCLDelayCounter = ui32CurrentOpenCLDelayCounter; + }else{ + ui32OpenCLDelayCounter -= 1; + (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = ui32OpenCLDelayCounter; + } + goto SGX_NoUKernel_LockUp; + } + + + #if defined(FIX_HW_BRN_31093) + if (bBRN31093Inval == IMG_FALSE) + { + /* It could be a BIF hang so do a INVAL_PTE */ + #if defined(FIX_HW_BRN_29997) + IMG_UINT32 ui32BIFCtrl; + /* Pause the BIF before issuing the invalidate */ + ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_PAUSE_MASK); + /* delay for 200 clocks */ + SGXWaitClocks(psDevInfo, 200); + #endif + /* Flag that we have attempt to un-block the BIF */ + bBRN31093Inval = IMG_TRUE; + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, EUR_CR_BIF_CTRL_INVAL_PTE_MASK); + /* delay for 200 clocks */ + SGXWaitClocks(psDevInfo, 200); + + #if defined(FIX_HW_BRN_29997) + /* un-pause the BIF by restoring the BIF_CTRL */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl); + #endif + } + else + #endif + { + PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks)); + + bLockup = IMG_TRUE; + (psDevInfo->psSGXHostCtl)->ui32OpenCLDelayCount = 0; + } + } + } + else + { + #if defined(FIX_HW_BRN_31093) + bBRN31093Inval = IMG_FALSE; + #endif + ui32LockupCounter = 0; + ui32EDMTasks = ui32CurrentEDMTasks; + ui32NumResets = psDevInfo->ui32NumResets; + } + } +SGX_NoUKernel_LockUp: + + if (bLockup) + { + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + + /* increment the counter so we know the host detected the lockup */ + psSGXHostCtl->ui32HostDetectedLockups ++; + + /* Reset the chip and process the queues. */ + HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID); + } +} +#endif /* defined(SUPPORT_HW_RECOVERY) */ + + + +#if defined(SYS_USING_INTERRUPTS) + +/* + SGX ISR Handler +*/ +IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData) +{ + IMG_BOOL bInterruptProcessed = IMG_FALSE; + + + /* Real Hardware */ + { + IMG_UINT32 ui32EventStatus = 0, ui32EventEnable = 0; + IMG_UINT32 ui32EventClear = 0; +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + IMG_UINT32 ui32EventStatus2, ui32EventEnable2; +#endif + IMG_UINT32 ui32EventClear2 = 0; + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_SGXDEV_INFO *psDevInfo; + + /* check for null pointers */ + if(pvData == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n")); + return bInterruptProcessed; + } + + psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; + + if(!powering_down) { + ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); + ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE); + } + + /* test only the unmasked bits */ + ui32EventStatus &= ui32EventEnable; + +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + if(!powering_down) { + ui32EventStatus2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); + ui32EventEnable2 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE2); + } + + /* test only the unmasked bits */ + ui32EventStatus2 &= ui32EventEnable2; +#endif /* defined(SGX_FEATURE_DATA_BREAKPOINTS) */ + + /* Thought: is it better to insist that the bit assignment in + the "clear" register(s) matches that of the "status" register(s)? + It would greatly simplify this LISR */ + + if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK) + { + ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK; + } + +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK) + { + ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK; + } + + if (ui32EventStatus2 & EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK) + { + ui32EventClear2 |= EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK; + } +#endif /* defined(SGX_FEATURE_DATA_BREAKPOINTS) */ + + if (ui32EventClear || ui32EventClear2) + { + bInterruptProcessed = IMG_TRUE; + + /* Clear master interrupt bit */ + ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK; + + if(!powering_down) { + /* clear the events */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32EventClear2); + } + } + } + + return bInterruptProcessed; +} + + +/* + SGX MISR Handler +*/ +static IMG_VOID SGX_MISRHandler (IMG_VOID *pvData) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl; + + if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL) && + ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) == 0UL)) + { + HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID); + } + +#if defined(OS_SUPPORTS_IN_LISR) + if (psDeviceNode->bReProcessDeviceCommandComplete) + { + SGXScheduleProcessQueuesKM(psDeviceNode); + } +#endif + + SGXTestActivePowerEvent(psDeviceNode, ISR_ID); +} +#endif /* #if defined (SYS_USING_INTERRUPTS) */ + +#if defined(SUPPORT_MEMORY_TILING) + +IMG_INTERNAL +PVRSRV_ERROR SGX_AllocMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32XTileStride, + IMG_UINT32 *pui32RangeIndex) +{ + return SGX_AllocMemTilingRangeInt(psDeviceNode->pvDevice, + psMemInfo->sDevVAddr.uiAddr, + psMemInfo->sDevVAddr.uiAddr + ((IMG_UINT32) psMemInfo->uAllocSize) + SGX_MMU_PAGE_SIZE - 1, + ui32XTileStride, + pui32RangeIndex); +} + +IMG_INTERNAL +PVRSRV_ERROR SGX_FreeMemTilingRange(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32RangeIndex) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32Offset; + IMG_UINT32 ui32Val; + + if(ui32RangeIndex >= 10) + { + PVR_DPF((PVR_DBG_ERROR,"SGX_FreeMemTilingRange: invalid Range index ")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* clear the usage bit */ + psDevInfo->ui32MemTilingUsage &= ~(1<<ui32RangeIndex); + + /* disable the range */ + ui32Offset = EUR_CR_BIF_TILE0 + (ui32RangeIndex<<2); + ui32Val = 0; + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Offset, ui32Val); + PDUMPREG(SGX_PDUMPREG_NAME, ui32Offset, ui32Val); + + return PVRSRV_OK; +} + +#endif /* defined(SUPPORT_MEMORY_TILING) */ + + +static IMG_VOID SGXCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + #if defined(SGX_FEATURE_MP) + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_SL; + #else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + #endif /* SGX_FEATURE_MP */ +} + +/*! +******************************************************************************* + + @Function SGXRegisterDevice + + @Description + + Registers the device with the system + + @Input: psDeviceNode - device node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode) +{ + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + + /* setup details that never change */ + psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE; + psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS; +#if defined(PDUMP) + { + /* memory space names are set up in system code */ + SGX_DEVICE_MAP *psSGXDeviceMemMap; + SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMemMap); + + psDeviceNode->sDevId.pszPDumpDevName = psSGXDeviceMemMap->pszPDumpDevName; + PVR_ASSERT(psDeviceNode->sDevId.pszPDumpDevName != IMG_NULL); + } + + psDeviceNode->sDevId.pszPDumpRegName = SGX_PDUMPREG_NAME; +#endif /* PDUMP */ + + psDeviceNode->pfnInitDevice = &DevInitSGXPart1; + psDeviceNode->pfnDeInitDevice = &DevDeInitSGX; + + psDeviceNode->pfnInitDeviceCompatCheck = &SGXDevInitCompatCheck; +#if defined(PDUMP) + psDeviceNode->pfnPDumpInitDevice = &SGXResetPDump; + psDeviceNode->pfnMMUGetContextID = &MMU_GetPDumpContextID; +#endif + /* + MMU callbacks + */ + psDeviceNode->pfnMMUInitialise = &MMU_Initialise; + psDeviceNode->pfnMMUFinalise = &MMU_Finalise; + psDeviceNode->pfnMMUInsertHeap = &MMU_InsertHeap; + psDeviceNode->pfnMMUCreate = &MMU_Create; + psDeviceNode->pfnMMUDelete = &MMU_Delete; + psDeviceNode->pfnMMUAlloc = &MMU_Alloc; + psDeviceNode->pfnMMUFree = &MMU_Free; + psDeviceNode->pfnMMUMapPages = &MMU_MapPages; + psDeviceNode->pfnMMUMapShadow = &MMU_MapShadow; + psDeviceNode->pfnMMUUnmapPages = &MMU_UnmapPages; + psDeviceNode->pfnMMUMapScatter = &MMU_MapScatter; + psDeviceNode->pfnMMUGetPhysPageAddr = &MMU_GetPhysPageAddr; + psDeviceNode->pfnMMUGetPDDevPAddr = &MMU_GetPDDevPAddr; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + psDeviceNode->pfnMMUIsHeapShared = &MMU_IsHeapShared; +#endif +#if defined(FIX_HW_BRN_31620) + psDeviceNode->pfnMMUGetCacheFlushRange = &MMU_GetCacheFlushRange; + psDeviceNode->pfnMMUGetPDPhysAddr = &MMU_GetPDPhysAddr; +#else + psDeviceNode->pfnMMUGetCacheFlushRange = IMG_NULL; + psDeviceNode->pfnMMUGetPDPhysAddr = IMG_NULL; +#endif + psDeviceNode->pfnMMUMapPagesSparse = &MMU_MapPagesSparse; + psDeviceNode->pfnMMUMapShadowSparse = &MMU_MapShadowSparse; + +#if defined (SYS_USING_INTERRUPTS) + /* + SGX ISR handler + */ + psDeviceNode->pfnDeviceISR = SGX_ISRHandler; + psDeviceNode->pfnDeviceMISR = SGX_MISRHandler; +#endif + +#if defined(SUPPORT_MEMORY_TILING) + psDeviceNode->pfnAllocMemTilingRange = SGX_AllocMemTilingRange; + psDeviceNode->pfnFreeMemTilingRange = SGX_FreeMemTilingRange; +#endif + + /* + SGX command complete handler + */ + psDeviceNode->pfnDeviceCommandComplete = &SGXCommandComplete; + + psDeviceNode->pfnCacheInvalidate = SGXCacheInvalidate; + + /* + and setup the device's memory map: + */ + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + /* size of address space */ + psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_FEATURE_ADDRESS_SPACE_SIZE; + + /* flags, backing store details to be specified by system */ + psDevMemoryInfo->ui32Flags = 0; + + /* device memory heap info about each heap in a device address space */ + if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, + sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID, + (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0, + "Array of Device Memory Heap Info") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO")); + return (PVRSRV_ERROR_OUT_OF_MEMORY); + } + OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID); + + psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; + + /* + setup heaps + Note: backing store to be setup by system (defaults to UMA) + */ + + /************* general ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "General"; + psDeviceMemoryHeap->pszBSName = "General BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; +#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + /* specify the mapping heap ID for this device */ + psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); +#endif + psDeviceMemoryHeap++;/* advance to the next heap */ + +#if defined(SUPPORT_MEMORY_TILING) + /************* VPB tiling ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VPB_TILED_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VPB_TILED_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_VPB_TILED_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "VPB Tiled"; + psDeviceMemoryHeap->pszBSName = "VPB Tiled BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap->ui32XTileStride = SGX_VPB_TILED_HEAP_STRIDE; + PVR_DPF((PVR_DBG_WARNING, "VPB tiling heap tiling stride = 0x%x", psDeviceMemoryHeap->ui32XTileStride)); + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif + +#if defined(SUPPORT_ION) + /************* Ion Heap ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_ION_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_ION_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_ION_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "Ion"; + psDeviceMemoryHeap->pszBSName = "Ion BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* specify the ion heap ID for this device */ + psDevMemoryInfo->ui32IonHeapID = SGX_ION_HEAP_ID; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif + + /************* TA data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "TA Data"; + psDeviceMemoryHeap->pszBSName = "TA Data BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* kernel code ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "Kernel Code"; + psDeviceMemoryHeap->pszBSName = "Kernel Code BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* Kernel Video Data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "KernelData"; + psDeviceMemoryHeap->pszBSName = "KernelData BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* PixelShaderUSSE ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE; + /* + The actual size of the pixel and vertex shader heap must be such that all + addresses are within range of the one of the USSE code base registers, but + the addressable range is hardware-dependent. + SGX_PIXELSHADER_HEAP_SIZE is defined to be the maximum possible size + to ensure that the heap layout is consistent across all SGXs. + */ + psDeviceMemoryHeap->ui32HeapSize = ((10 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000); + PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_PIXELSHADER_HEAP_SIZE); + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "PixelShaderUSSE"; + psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* VertexShaderUSSE ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE; + /* See comment above with PixelShaderUSSE ui32HeapSize */ + psDeviceMemoryHeap->ui32HeapSize = ((4 << SGX_USE_CODE_SEGMENT_RANGE_BITS) - 0x00001000); + PVR_ASSERT(psDeviceMemoryHeap->ui32HeapSize <= SGX_VERTEXSHADER_HEAP_SIZE); + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "VertexShaderUSSE"; + psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* PDS Pixel Code/Data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "PDSPixelCodeData"; + psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* PDS Vertex Code/Data ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "PDSVertexCodeData"; + psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* CacheCoherent ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "CacheCoherent"; + psDeviceMemoryHeap->pszBSName = "CacheCoherent BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + /* set the sync heap id */ + psDevMemoryInfo->ui32SyncHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); + psDeviceMemoryHeap++;/* advance to the next heap */ + + + /************* Shared 3D Parameters ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SHARED_3DPARAMETERS_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SHARED_3DPARAMETERS_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_SHARED_3DPARAMETERS_HEAP_SIZE; + psDeviceMemoryHeap->pszName = "Shared 3DParameters"; + psDeviceMemoryHeap->pszBSName = "Shared 3DParameters BS"; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + /************* Percontext 3D Parameters ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PERCONTEXT_3DPARAMETERS_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PERCONTEXT_3DPARAMETERS_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_PERCONTEXT_3DPARAMETERS_HEAP_SIZE; + psDeviceMemoryHeap->pszName = "Percontext 3DParameters"; + psDeviceMemoryHeap->pszBSName = "Percontext 3DParameters BS"; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ + + +#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) + /************* General Mapping ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_MULTI_PROCESS; + psDeviceMemoryHeap->pszName = "GeneralMapping"; + psDeviceMemoryHeap->pszBSName = "GeneralMapping BS"; + #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) + /* + if((2D hardware is enabled) + && (multi-mem contexts enabled) + && (BRN23410 is present)) + - then don't make the heap per-context otherwise + the TA and 2D requestors must always be aligned to + the same address space which could affect performance + */ + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + #else /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) */ + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT; + #endif /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410) */ + + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + /* specify the mapping heap ID for this device */ + psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif /* #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP) */ + + +#if defined(SGX_FEATURE_2D_HARDWARE) + /************* 2D HW Heap ***************/ + psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID); + psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE; + psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE; + psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE + | PVRSRV_MEM_RAM_BACKED_ALLOCATION + | PVRSRV_HAP_SINGLE_PROCESS; + psDeviceMemoryHeap->pszName = "2D"; + psDeviceMemoryHeap->pszBSName = "2D BS"; + psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED; + /* set the default (4k). System can override these as required */ + psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE; + psDeviceMemoryHeap++;/* advance to the next heap */ +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE) */ + + + /* set the heap count */ + psDevMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap); + + return PVRSRV_OK; +} + +#if defined(PDUMP) +static +PVRSRV_ERROR SGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)(psDeviceNode->pvDevice); + psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0; + PVR_DPF((PVR_DBG_MESSAGE, "Reset pdump CCB write offset.")); + + return PVRSRV_OK; +} +#endif /* PDUMP */ + + +/*! +******************************************************************************* + + @Function SGXGetClientInfoKM + + @Description Gets the client information + + @Input hDevCookie + + @Output psClientInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie, + SGX_CLIENT_INFO* psClientInfo) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + /* + If this is the first client to connect to SGX perform initialisation + */ + psDevInfo->ui32ClientRefCount++; + + /* + Copy information to the client info. + */ + psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM(); + + /* + Copy requested information. + */ + OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData)); + + /* just return OK */ + return PVRSRV_OK; +} + + +/*! +******************************************************************************* + + @Function SGXPanic + + @Description + + Called when an unrecoverable situation is detected. Dumps SGX debug + information and tells the OS to panic. + + @Input psDevInfo - SGX device info + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXPanic(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + PVR_LOG(("SGX panic")); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + OSPanic(); +} + + +/*! +******************************************************************************* + + @Function SGXDevInitCompatCheck + + @Description + + Check compatibility of host driver and microkernel (DDK and build options) + for SGX devices at services/device initialisation + + @Input psDeviceNode - device node + + @Return PVRSRV_ERROR - depending on mismatch found + +******************************************************************************/ +PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo; + IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch; +#if !defined(NO_HARDWARE) + PPVRSRV_KERNEL_MEM_INFO psMemInfo; + PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt; /*!< internal misc info for ukernel */ + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; + SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes; /*!< microkernel structure sizes */ + IMG_BOOL bStructSizesFailed; + + /* Exceptions list for core rev check, format is pairs of (hw rev, sw rev) */ + IMG_BOOL bCheckCoreRev; + const IMG_UINT32 aui32CoreRevExceptions[] = + { + 0x10100, 0x10101 + }; + const IMG_UINT32 ui32NumCoreExceptions = sizeof(aui32CoreRevExceptions) / (2*sizeof(IMG_UINT32)); + IMG_UINT i; +#endif + + /* Ensure it's a SGX device */ + if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX) + { + PVR_LOG(("(FAIL) SGXInit: Device not of type SGX")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto chk_exit; + } + + psDevInfo = psDeviceNode->pvDevice; + + /* + * 1. Check kernel-side and client-side build options + * 2. Ensure ukernel DDK version and driver DDK version are compatible + * 3. Check ukernel build options against kernel-side build options + */ + + /* + * Check KM build options against client-side host driver + */ + + ui32BuildOptions = (SGX_BUILD_OPTIONS); + if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions; + if ( (psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; " + "extra options present in client-side driver: (0x%x). Please check sgx_options.h", + psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch )); + } + + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; " + "extra options present in KM: (0x%x). Please check sgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch )); + } + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Client-side and KM driver build options match. [ OK ]")); + } + +#if !defined (NO_HARDWARE) + psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + + /* Clear state (not strictly necessary since this is the first call) */ + psSGXMiscInfoInt = psMemInfo->pvLinAddrKM; + psSGXMiscInfoInt->ui32MiscInfoFlags = 0; + psSGXMiscInfoInt->ui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES; + eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, IMG_NULL); + + /* + * Validate DDK version + */ + if(eError != PVRSRV_OK) + { + PVR_LOG(("(FAIL) SGXInit: Unable to validate device DDK version")); + goto chk_exit; + } + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + if( (psSGXFeatures->ui32DDKVersion != + ((PVRVERSION_MAJ << 16) | + (PVRVERSION_MIN << 8) | + PVRVERSION_BRANCH) ) || + (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD) ) + { + PVR_LOG(("(FAIL) SGXInit: Incompatible driver DDK revision (%d)/device DDK revision (%d).", + PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild)); + eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: driver DDK (%d) and device DDK (%d) match. [ OK ]", + PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild)); + } + + /* + * Check hardware core revision is compatible with the one in software + */ + if (psSGXFeatures->ui32CoreRevSW == 0) + { + /* + Head core revision cannot be checked. + */ + PVR_LOG(("SGXInit: HW core rev (%x) check skipped.", + psSGXFeatures->ui32CoreRev)); + } + else + { + /* For some cores the hw/sw core revisions are expected not to match. For these + * exceptional cases the core rev compatibility check should be skipped. + */ + bCheckCoreRev = IMG_TRUE; + for(i=0; i<ui32NumCoreExceptions; i+=2) + { + if( (psSGXFeatures->ui32CoreRev==aui32CoreRevExceptions[i]) && + (psSGXFeatures->ui32CoreRevSW==aui32CoreRevExceptions[i+1]) ) + { + PVR_LOG(("SGXInit: HW core rev (%x), SW core rev (%x) check skipped.", + psSGXFeatures->ui32CoreRev, + psSGXFeatures->ui32CoreRevSW)); + bCheckCoreRev = IMG_FALSE; + } + } + + if (bCheckCoreRev) + { + if (psSGXFeatures->ui32CoreRev != psSGXFeatures->ui32CoreRevSW) + { + PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%x) and SW core rev (%x).", + psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW)); + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: HW core rev (%x) and SW core rev (%x) match. [ OK ]", + psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW)); + } + } + } + + /* + * Check ukernel structure sizes are the same as those in the driver + */ + psSGXStructSizes = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXStructSizes; + + bStructSizesFailed = IMG_FALSE; + + CHECK_SIZE(HOST_CTL); + CHECK_SIZE(COMMAND); +#if defined(SGX_FEATURE_2D_HARDWARE) + CHECK_SIZE(2DCMD); + CHECK_SIZE(2DCMD_SHARED); +#endif + CHECK_SIZE(CMDTA); + CHECK_SIZE(CMDTA_SHARED); + CHECK_SIZE(TRANSFERCMD); + CHECK_SIZE(TRANSFERCMD_SHARED); + + CHECK_SIZE(3DREGISTERS); + CHECK_SIZE(HWPBDESC); + CHECK_SIZE(HWRENDERCONTEXT); + CHECK_SIZE(HWRENDERDETAILS); + CHECK_SIZE(HWRTDATA); + CHECK_SIZE(HWRTDATASET); + CHECK_SIZE(HWTRANSFERCONTEXT); + + if (bStructSizesFailed == IMG_TRUE) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes.")); + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: SGXMKIF structure sizes match. [ OK ]")); + } + + /* + * Check ukernel build options against KM host driver + */ + + ui32BuildOptions = psSGXFeatures->ui32BuildOptions; + if (ui32BuildOptions != (SGX_BUILD_OPTIONS)) + { + ui32BuildOptionsMismatch = ui32BuildOptions ^ (SGX_BUILD_OPTIONS); + if ( ((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; " + "extra options present in driver: (0x%x). Please check sgx_options.h", + (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch )); + } + + if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) + { + PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; " + "extra options present in microkernel: (0x%x). Please check sgx_options.h", + ui32BuildOptions & ui32BuildOptionsMismatch )); + } + eError = PVRSRV_ERROR_BUILD_MISMATCH; + goto chk_exit; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Driver and microkernel build options match. [ OK ]")); + } +#endif // NO_HARDWARE + + eError = PVRSRV_OK; +chk_exit: +#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK) + return PVRSRV_OK; +#else + return eError; +#endif +} + +/* + * @Function SGXGetMiscInfoUkernel + * + * @Description Returns misc info (e.g. SGX build info/flags) from microkernel + * + * @Input psDevInfo : device info from init phase + * @Input psDeviceNode : device node, used for scheduling ukernel to query SGX features + * + * @Return PVRSRV_ERROR : + * + */ +static +PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext) +{ + PVRSRV_ERROR eError; + SGXMKIF_COMMAND sCommandData; /* CCB command data */ + PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt; /*!< internal misc info for ukernel */ + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; /*!< sgx features for client */ + SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes; /*!< internal info: microkernel structure sizes */ + + PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + + if (! psMemInfo->pvLinAddrKM) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Invalid address.")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + psSGXMiscInfoInt = psMemInfo->pvLinAddrKM; + psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures; + psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes; + + psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY; + + /* Reset SGX features */ + OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures)); + OSMemSet(psSGXStructSizes, 0, sizeof(*psSGXStructSizes)); + + /* set up buffer address for SGX features in CCB */ + sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr; /* device V addr of output buffer */ + + PDUMPCOMMENT("Microkernel kick for SGXGetMiscInfo"); + eError = SGXScheduleCCBCommandKM(psDeviceNode, + SGXMKIF_CMD_GETMISCINFO, + &sCommandData, + KERNEL_ID, + 0, + hDevMemContext, + IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed.")); + return eError; + } + + /* FIXME: DWORD value to determine code path in ukernel? + * E.g. could use getMiscInfo to obtain register values for diagnostics? */ + +#if !defined(NO_HARDWARE) + { + IMG_BOOL bExit; + + bExit = IMG_FALSE; + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if ((psSGXMiscInfoInt->ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) != 0) + { + bExit = IMG_TRUE; + break; + } + } END_LOOP_UNTIL_TIMEOUT(); + + /*if the loop exited because a timeout*/ + if (!bExit) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info.")); + return PVRSRV_ERROR_TIMEOUT; + } + } +#endif /* NO_HARDWARE */ + + return PVRSRV_OK; +} + + + +/* + * @Function SGXGetMiscInfoKM + * + * @Description Returns miscellaneous SGX info + * + * @Input psDevInfo : device info from init phase + * @Input psDeviceNode : device node, used for scheduling ukernel to query SGX features + * + * @Output psMiscInfo : query request plus user-mode mem for holding returned data + * + * @Return PVRSRV_ERROR : + * + */ +IMG_EXPORT +PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, + SGX_MISC_INFO *psMiscInfo, + PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_HANDLE hDevMemContext) +{ + PVRSRV_ERROR eError; + PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + IMG_UINT32 *pui32MiscInfoFlags = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->ui32MiscInfoFlags; + + /* Reset the misc info state flags */ + *pui32MiscInfoFlags = 0; + +#if !defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + PVR_UNREFERENCED_PARAMETER(hDevMemContext); +#endif + + switch(psMiscInfo->eRequest) + { +#if defined(SGX_FEATURE_DATA_BREAKPOINTS) + case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT: + { + IMG_UINT32 ui32MaskDM; + IMG_UINT32 ui32CtrlWEnable; + IMG_UINT32 ui32CtrlREnable; + IMG_UINT32 ui32CtrlTrapEnable; + IMG_UINT32 ui32RegVal; + IMG_UINT32 ui32StartRegVal; + IMG_UINT32 ui32EndRegVal; + SGXMKIF_COMMAND sCommandData; + + /* Set or Clear BP? */ + if(psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable) + { + /* set the break point */ + IMG_DEV_VIRTADDR sBPDevVAddr = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddr; + IMG_DEV_VIRTADDR sBPDevVAddrEnd = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddrEnd; + + /* BP address */ + ui32StartRegVal = sBPDevVAddr.uiAddr & EUR_CR_BREAKPOINT0_START_ADDRESS_MASK; + ui32EndRegVal = sBPDevVAddrEnd.uiAddr & EUR_CR_BREAKPOINT0_END_ADDRESS_MASK; + + ui32MaskDM = psMiscInfo->uData.sSGXBreakpointInfo.ui32DataMasterMask; + ui32CtrlWEnable = psMiscInfo->uData.sSGXBreakpointInfo.bWrite; + ui32CtrlREnable = psMiscInfo->uData.sSGXBreakpointInfo.bRead; + ui32CtrlTrapEnable = psMiscInfo->uData.sSGXBreakpointInfo.bTrapped; + + /* normal data BP */ + ui32RegVal = ((ui32MaskDM<<EUR_CR_BREAKPOINT0_MASK_DM_SHIFT) & EUR_CR_BREAKPOINT0_MASK_DM_MASK) | + ((ui32CtrlWEnable<<EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK) | + ((ui32CtrlREnable<<EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK) | + ((ui32CtrlTrapEnable<<EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT) & EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK); + } + else + { + /* clear the break point */ + ui32RegVal = ui32StartRegVal = ui32EndRegVal = 0; + } + + /* setup the command */ + sCommandData.ui32Data[0] = psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex; + sCommandData.ui32Data[1] = ui32StartRegVal; + sCommandData.ui32Data[2] = ui32EndRegVal; + sCommandData.ui32Data[3] = ui32RegVal; + + /* clear signal flags */ + psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0; + + PDUMPCOMMENT("Microkernel kick for setting a data breakpoint"); + eError = SGXScheduleCCBCommandKM(psDeviceNode, + SGXMKIF_CMD_DATABREAKPOINT, + &sCommandData, + KERNEL_ID, + 0, + hDevMemContext, + IMG_FALSE); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: SGXScheduleCCBCommandKM failed.")); + return eError; + } + +#if defined(NO_HARDWARE) + /* clear signal flags */ + psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0; +#else + { + IMG_BOOL bExit; + + bExit = IMG_FALSE; + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if (psDevInfo->psSGXHostCtl->ui32BPSetClearSignal != 0) + { + bExit = IMG_TRUE; + /* clear signal flags */ + psDevInfo->psSGXHostCtl->ui32BPSetClearSignal = 0; + break; + } + } END_LOOP_UNTIL_TIMEOUT(); + + /*if the loop exited because a timeout*/ + if (!bExit) + { + PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoKM: Timeout occurred waiting BP set/clear")); + return PVRSRV_ERROR_TIMEOUT; + } + } +#endif /* NO_HARDWARE */ + + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_POLL_BREAKPOINT: + { + /* This request checks to see whether a breakpoint has + been trapped. If so, it returns the number of the + breakpoint number that was trapped in ui32BPIndex, + sTrappedBPDevVAddr to the address which was trapped, + and sets bTrappedBP. Otherwise, bTrappedBP will be + false, and other fields should be ignored. */ + /* The uKernel is not used, since if we are stopped on a + breakpoint, it is not possible to guarantee that the + uKernel would be able to run */ +#if !defined(NO_HARDWARE) +#if defined(SGX_FEATURE_MP) + IMG_BOOL bTrappedBPMaster; + IMG_UINT32 ui32CoreNum, ui32TrappedBPCoreNum; +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + IMG_UINT32 ui32PipeNum, ui32TrappedBPPipeNum; +/* ui32PipeNum is the pipe number plus 1, or 0 to represent "partition" */ +#define NUM_PIPES_PLUS_ONE (SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES+1) +#endif + IMG_BOOL bTrappedBPAny; +#endif /* defined(SGX_FEATURE_MP) */ + IMG_BOOL bFoundOne; + +#if defined(SGX_FEATURE_MP) + ui32TrappedBPCoreNum = 0; + bTrappedBPMaster = !!(EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT)); + bTrappedBPAny = bTrappedBPMaster; +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32TrappedBPPipeNum = 0; /* just to keep the (incorrect) compiler happy */ +#endif + for (ui32CoreNum = 0; ui32CoreNum < SGX_FEATURE_MP_CORE_COUNT_3D; ui32CoreNum++) + { +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + /* FIXME: this macro makes the assumption that the PARTITION regs are the same + distance before the PIPE0 regs as the PIPE1 regs are after it, _and_ + assumes that the fields in the partition regs are in the same place + in the pipe regs. Need to validate these assumptions, or assert them */ +#define SGX_MP_CORE_PIPE_SELECT(r,c,p) \ + ((SGX_MP_CORE_SELECT(EUR_CR_PARTITION_##r,c) + p*(EUR_CR_PIPE0_##r-EUR_CR_PARTITION_##r))) + for (ui32PipeNum = 0; ui32PipeNum < NUM_PIPES_PLUS_ONE; ui32PipeNum++) + { + bFoundOne = + 0 != (EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK & + OSReadHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, + ui32CoreNum, + ui32PipeNum))); + if (bFoundOne) + { + bTrappedBPAny = IMG_TRUE; + ui32TrappedBPCoreNum = ui32CoreNum; + ui32TrappedBPPipeNum = ui32PipeNum; + } + } +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + bFoundOne = !!(EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum))); + if (bFoundOne) + { + bTrappedBPAny = IMG_TRUE; + ui32TrappedBPCoreNum = ui32CoreNum; + } +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + } + + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = bTrappedBPAny; +#else /* defined(SGX_FEATURE_MP) */ +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + #error Not yet considered the case for per-pipe regs in non-mp case +#endif + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP = 0 != (EUR_CR_BREAKPOINT_TRAPPED_MASK & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT)); +#endif /* defined(SGX_FEATURE_MP) */ + + if (psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBP) + { + IMG_UINT32 ui32Info0, ui32Info1; + +#if defined(SGX_FEATURE_MP) +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum)); + ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum, ui32TrappedBPPipeNum)); +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO0, ui32TrappedBPCoreNum)); + ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, bTrappedBPMaster?EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1:SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP_INFO1, ui32TrappedBPCoreNum)); +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#else /* defined(SGX_FEATURE_MP) */ + ui32Info0 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO0); + ui32Info1 = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BREAKPOINT_TRAP_INFO1); +#endif /* defined(SGX_FEATURE_MP) */ + +#ifdef SGX_FEATURE_PERPIPE_BKPT_REGS + psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK); + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT; +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.sTrappedBPDevVAddr.uiAddr = ui32Info0 & EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPBurstLength = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.bTrappedBPRead = !!(ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_MASK); + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPDataMaster = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT; + psMiscInfo->uData.sSGXBreakpointInfo.ui32TrappedBPTag = (ui32Info1 & EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_MASK) >> EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SHIFT; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#if defined(SGX_FEATURE_MP) +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + /* mp, per-pipe regbanks */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:(ui32TrappedBPCoreNum + (ui32TrappedBPPipeNum<<10)); +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + /* mp, regbanks unsplit */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = bTrappedBPMaster?65535:ui32TrappedBPCoreNum; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#else /* defined(SGX_FEATURE_MP) */ +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + /* non-mp, per-pipe regbanks */ +#error non-mp perpipe regs not yet supported +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + /* non-mp */ + psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum = 65534; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ +#endif /* defined(SGX_FEATURE_MP) */ + } +#endif /* !defined(NO_HARDWARE) */ + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_RESUME_BREAKPOINT: + { + /* This request resumes from the currently trapped breakpoint. */ + /* Core number must be supplied */ + /* Polls for notify to be acknowledged by h/w */ +#if !defined(NO_HARDWARE) +#if defined(SGX_FEATURE_MP) + IMG_UINT32 ui32CoreNum; + IMG_BOOL bMaster; +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + IMG_UINT32 ui32PipeNum; +#endif +#endif /* defined(SGX_FEATURE_MP) */ + IMG_UINT32 ui32OldSeqNum, ui32NewSeqNum; + +#if defined(SGX_FEATURE_MP) +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32PipeNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum >> 10; + ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum & 1023; + bMaster = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum > 32767; +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + ui32CoreNum = psMiscInfo->uData.sSGXBreakpointInfo.ui32CoreNum; + bMaster = ui32CoreNum > SGX_FEATURE_MP_CORE_COUNT_3D; +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + if (bMaster) + { + /* master */ + /* EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK | EUR_CR_MASTER_BREAKPOINT_SEQNUM_MASK */ + ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT_TRAP, EUR_CR_MASTER_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_MASTER_BREAKPOINT_TRAP_CONTINUE_MASK); + do + { + ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BREAKPOINT); + } + while (ui32OldSeqNum == ui32NewSeqNum); + } + else +#endif /* defined(SGX_FEATURE_MP) */ + { + /* core */ +#if defined(SGX_FEATURE_PERPIPE_BKPT_REGS) + ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum)); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT_TRAP, ui32CoreNum, ui32PipeNum), EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK); + do + { + ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_PIPE_SELECT(BREAKPOINT, ui32CoreNum, ui32PipeNum)); + } + while (ui32OldSeqNum == ui32NewSeqNum); +#else /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + ui32OldSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum)); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT_TRAP, ui32CoreNum), EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_MASK | EUR_CR_BREAKPOINT_TRAP_CONTINUE_MASK); + do + { + ui32NewSeqNum = 0x1c & OSReadHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BREAKPOINT, ui32CoreNum)); + } + while (ui32OldSeqNum == ui32NewSeqNum); +#endif /* defined(SGX_FEATURE_PERPIPE_BKPT_REGS) */ + } +#endif /* !defined(NO_HARDWARE) */ + return PVRSRV_OK; + } +#endif /* SGX_FEATURE_DATA_BREAKPOINTS) */ + + case SGX_MISC_INFO_REQUEST_CLOCKSPEED: + { + psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed; + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_ACTIVEPOWER: + { + psMiscInfo->uData.sActivePower.ui32NumActivePowerEvents = psDevInfo->psSGXHostCtl->ui32NumActivePowerEvents; + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_LOCKUPS: + { +#if defined(SUPPORT_HW_RECOVERY) + psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = psDevInfo->psSGXHostCtl->ui32uKernelDetectedLockups; + psMiscInfo->uData.sLockups.ui32HostDetectedLockups = psDevInfo->psSGXHostCtl->ui32HostDetectedLockups; +#else + psMiscInfo->uData.sLockups.ui32uKernelDetectedLockups = 0; + psMiscInfo->uData.sLockups.ui32HostDetectedLockups = 0; +#endif + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_SPM: + { + /* this is dealt with in UM */ + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_SGXREV: + { + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; +// PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo; + + eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode, hDevMemContext); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n", + eError)); + return eError; + } + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + + /* Copy SGX features into misc info struct, to return to client */ + psMiscInfo->uData.sSGXFeatures = *psSGXFeatures; + + /* Debug output */ + PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: Core 0x%x, sw ID 0x%x, sw Rev 0x%x\n", + psSGXFeatures->ui32CoreRev, + psSGXFeatures->ui32CoreIdSW, + psSGXFeatures->ui32CoreRevSW)); + PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: DDK version 0x%x, DDK build 0x%x\n", + psSGXFeatures->ui32DDKVersion, + psSGXFeatures->ui32DDKBuild)); + + /* done! */ + return PVRSRV_OK; + } + + case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV: + { + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; + + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + + /* Reset the misc information to prevent + * confusion with values returned from the ukernel + */ + OSMemSet(psMemInfo->pvLinAddrKM, 0, + sizeof(PVRSRV_SGX_MISCINFO_INFO)); + + psSGXFeatures->ui32DDKVersion = + (PVRVERSION_MAJ << 16) | + (PVRVERSION_MIN << 8) | + PVRVERSION_BRANCH; + psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD; + + /* Also report the kernel module build options -- used in SGXConnectionCheck() */ + psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS); + +#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) + /* Report the EDM status buffer location in memory */ + psSGXFeatures->sDevVAEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->sDevVAddr; + psSGXFeatures->pvEDMStatusBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM; +#endif + + /* Copy SGX features into misc info struct, to return to client */ + psMiscInfo->uData.sSGXFeatures = *psSGXFeatures; + return PVRSRV_OK; + } + +#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) + case SGX_MISC_INFO_REQUEST_MEMREAD: + case SGX_MISC_INFO_REQUEST_MEMCOPY: + { + PVRSRV_ERROR eError; + PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures; + PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemSrc; /* user-defined mem read */ + PVRSRV_SGX_MISCINFO_MEMACCESS *psSGXMemDest; /* user-defined mem write */ + + { + /* Set the mem read flag; src is user-defined */ + *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD; + psSGXMemSrc = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessSrc; + + if(psMiscInfo->sDevVAddrSrc.uiAddr != 0) + { + psSGXMemSrc->sDevVAddr = psMiscInfo->sDevVAddrSrc; /* src address */ + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + if( psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMCOPY) + { + /* Set the mem write flag; dest is user-defined */ + *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMWRITE; + psSGXMemDest = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemAccessDest; + + if(psMiscInfo->sDevVAddrDest.uiAddr != 0) + { + psSGXMemDest->sDevVAddr = psMiscInfo->sDevVAddrDest; /* dest address */ + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + /* Get physical address of PD for memory read (may need to switch context in microkernel) */ + if(psMiscInfo->hDevMemContext != IMG_NULL) + { + SGXGetMMUPDAddrKM( (IMG_HANDLE)psDeviceNode, hDevMemContext, &psSGXMemSrc->sPDDevPAddr); + + /* Single app will always use the same src and dest mem context */ + psSGXMemDest->sPDDevPAddr = psSGXMemSrc->sPDDevPAddr; + } + else + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* Submit the task to the ukernel */ + eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n", + eError)); + return eError; + } + psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures; + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + if(*pui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_MEMREAD_FAIL) + { + return PVRSRV_ERROR_INVALID_MISCINFO; + } +#endif + /* Copy SGX features into misc info struct, to return to client */ + psMiscInfo->uData.sSGXFeatures = *psSGXFeatures; + return PVRSRV_OK; + } +#endif /* SUPPORT_SGX_EDM_MEMORY_DEBUG */ + +#if defined(SUPPORT_SGX_HWPERF) + case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS: + { + PVRSRV_SGX_MISCINFO_SET_HWPERF_STATUS *psSetHWPerfStatus = &psMiscInfo->uData.sSetHWPerfStatus; + const IMG_UINT32 ui32ValidFlags = PVRSRV_SGX_HWPERF_STATUS_RESET_COUNTERS | + PVRSRV_SGX_HWPERF_STATUS_GRAPHICS_ON | + PVRSRV_SGX_HWPERF_STATUS_PERIODIC_ON | + PVRSRV_SGX_HWPERF_STATUS_MK_EXECUTION_ON; + SGXMKIF_COMMAND sCommandData = {0}; + + /* Check for valid flags */ + if ((psSetHWPerfStatus->ui32NewHWPerfStatus & ~ui32ValidFlags) != 0) + { + return PVRSRV_ERROR_INVALID_PARAMS; + } + + #if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, + "SGX ukernel HWPerf status %u\n", + psSetHWPerfStatus->ui32NewHWPerfStatus); + #endif /* PDUMP */ + + /* Copy the new group selector(s) to the host ctl for the ukernel */ + #if defined(SGX_FEATURE_EXTENDED_PERF_COUNTERS) + OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfGroup[0], + &psSetHWPerfStatus->aui32PerfGroup[0], + sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup)); + OSMemCopy(&psDevInfo->psSGXHostCtl->aui32PerfBit[0], + &psSetHWPerfStatus->aui32PerfBit[0], + sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit)); + psDevInfo->psSGXHostCtl->ui32PerfCounterBitSelect = psSetHWPerfStatus->ui32PerfCounterBitSelect; + psDevInfo->psSGXHostCtl->ui32PerfSumMux = psSetHWPerfStatus->ui32PerfSumMux; + #if defined(PDUMP) + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, aui32PerfGroup), + sizeof(psDevInfo->psSGXHostCtl->aui32PerfGroup), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, aui32PerfBit), + sizeof(psDevInfo->psSGXHostCtl->aui32PerfBit), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PerfCounterBitSelect), + sizeof(psDevInfo->psSGXHostCtl->ui32PerfCounterBitSelect), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PerfSumMux), + sizeof(psDevInfo->psSGXHostCtl->ui32PerfSumMux), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + #else + psDevInfo->psSGXHostCtl->ui32PerfGroup = psSetHWPerfStatus->ui32PerfGroup; + #if defined(PDUMP) + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PerfGroup), + sizeof(psDevInfo->psSGXHostCtl->ui32PerfGroup), + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + #endif /* SGX_FEATURE_EXTENDED_PERF_COUNTERS */ + + /* Kick the ukernel to update the hardware state */ + sCommandData.ui32Data[0] = psSetHWPerfStatus->ui32NewHWPerfStatus; + eError = SGXScheduleCCBCommandKM(psDeviceNode, + SGXMKIF_CMD_SETHWPERFSTATUS, + &sCommandData, + KERNEL_ID, + 0, + hDevMemContext, + IMG_FALSE); + return eError; + } +#endif /* SUPPORT_SGX_HWPERF */ + + case SGX_MISC_INFO_DUMP_DEBUG_INFO: + { + PVR_LOG(("User requested SGX debug info")); + + /* Dump SGX debug data to the kernel log. */ + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_FALSE); + + return PVRSRV_OK; + } + + case SGX_MISC_INFO_DUMP_DEBUG_INFO_FORCE_REGS: + { + PVR_LOG(("User requested SGX debug info")); + + /* Dump SGX debug data to the kernel log. */ + SGXDumpDebugInfo(psDeviceNode->pvDevice, IMG_TRUE); + + return PVRSRV_OK; + } + +#if defined(DEBUG) + /* Don't allow user-mode to reboot the device in production drivers */ + case SGX_MISC_INFO_PANIC: + { + PVR_LOG(("User requested SGX panic")); + + SGXPanic(psDeviceNode->pvDevice); + + return PVRSRV_OK; + } +#endif + + default: + { + /* switch statement fell though, so: */ + return PVRSRV_ERROR_INVALID_PARAMS; + } + } +} + + +IMG_EXPORT +PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle, + IMG_UINT32 ui32ArraySize, + PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry, + IMG_UINT32 *pui32DataCount, + IMG_UINT32 *pui32ClockSpeed, + IMG_UINT32 *pui32HostTimeStamp) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM; + IMG_UINT i; + + for (i = 0; + psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize; + i++) + { + SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff]; + + psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo; + psClientHWPerfEntry[i].ui32PID = psMKPerfEntry->ui32PID; + psClientHWPerfEntry[i].ui32RTData = psMKPerfEntry->ui32RTData; + psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type; + psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal; + psClientHWPerfEntry[i].ui32Info = psMKPerfEntry->ui32Info; + psClientHWPerfEntry[i].ui32Clocksx16 = SGXConvertTimeStamp(psDevInfo, + psMKPerfEntry->ui32TimeWraps, + psMKPerfEntry->ui32Time); + OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0][0], + &psMKPerfEntry->ui32Counters[0][0], + sizeof(psMKPerfEntry->ui32Counters)); + + OSMemCopy(&psClientHWPerfEntry[i].ui32MiscCounters[0][0], + &psMKPerfEntry->ui32MiscCounters[0][0], + sizeof(psMKPerfEntry->ui32MiscCounters)); + + psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1); + } + + *pui32DataCount = i; + *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed; + *pui32HostTimeStamp = OSClockus(); + + return eError; +} + + +/****************************************************************************** + End of file (sgxinit.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxkick.c b/pvr-source/services4/srvkm/devices/sgx/sgxkick.c new file mode 100644 index 0000000..584f538 --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxkick.c @@ -0,0 +1,899 @@ +/*************************************************************************/ /*! +@Title Device specific kickTA routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <stddef.h> /* For the macro offsetof() */ +#include "services_headers.h" +#include "sgxinfo.h" +#include "sgxinfokm.h" +#if defined (PDUMP) +#include "sgxapi_km.h" +#include "pdump_km.h" +#endif +#include "sgx_bridge_km.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "sgxutils.h" +#include "ttrace.h" + +/*! +****************************************************************************** + + @Function SGXDoKickKM + + @Description + + Really kicks the TA + + @Input hDevHandle - Device handle + + @Return ui32Error - success or failure + +******************************************************************************/ +IMG_EXPORT +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK_KM *psCCBKick) +#else +PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick) +#endif +{ + PVRSRV_ERROR eError; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo; + SGXMKIF_CMDTA_SHARED *psTACmd; + IMG_UINT32 i; + IMG_HANDLE hDevMemContext = IMG_NULL; +#if defined(FIX_HW_BRN_31620) + hDevMemContext = psCCBKick->hDevMemContext; +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, KICK_TOKEN_DOKICK); + + if (!CCB_OFFSET_IS_VALID(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset)) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, KICK_TOKEN_DOKICK); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* override QAC warning about stricter alignment */ + /* PRQA S 3305 1 */ + psTACmd = CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset); + + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_START, KICK_TOKEN_DOKICK); + +#if defined(TTRACE) + if (psCCBKick->bFirstKickOrResume) + { + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, + PVRSRV_TRACE_CLASS_FLAGS, + KICK_TOKEN_FIRST_KICK); + } + + if (psCCBKick->bLastInScene) + { + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, + PVRSRV_TRACE_CLASS_FLAGS, + KICK_TOKEN_LAST_KICK); + } +#endif + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CCB, + KICK_TOKEN_CCB_OFFSET, psCCBKick->ui32CCBOffset); + + /* TA/3D dependency */ + if (psCCBKick->hTA3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA3D_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + + psTACmd->sTA3DDependency.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + + if (psCCBKick->bTADependency) + { + psSyncInfo->psSyncData->ui32WriteOpsPending++; + } + } + + if (psCCBKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_TA_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->sTATQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->sTATQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + + psTACmd->ui32TATQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++; + psTACmd->ui32TATQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psCCBKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_3D_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + + psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++; + psTACmd->ui323DTQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals; + if (psCCBKick->ui32NumTAStatusVals != 0) + { + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + psTACmd->sCtlTAStatusInfo[i] = psCCBKick->asTAStatusUpdate[i].sCtlStatus; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i]; + psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending; +#endif + } + } + + psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals; + if (psCCBKick->ui32Num3DStatusVals != 0) + { + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + psTACmd->sCtl3DStatusInfo[i] = psCCBKick->as3DStatusUpdate[i].sCtlStatus; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i]; + psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending; +#endif + } + } + + +#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) + /* SRC and DST sync dependencies */ + psTACmd->ui32NumTASrcSyncs = psCCBKick->ui32NumTASrcSyncs; + for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i]; + + psTACmd->asTASrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psTACmd->asTASrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + + /* Get ui32ReadOpsPending snapshot and copy into the CCB - before incrementing. */ + psTACmd->asTASrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++; + /* Copy ui32WriteOpsPending snapshot into the CCB. */ + psTACmd->asTASrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + psTACmd->ui32NumTADstSyncs = psCCBKick->ui32NumTADstSyncs; + for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i]; + + psTACmd->asTADstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psTACmd->asTADstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + + /* Get ui32ReadOpsPending snapshot and copy into the CCB */ + psTACmd->asTADstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + /* Copy ui32WriteOpsPending snapshot into the CCB - before incrementing */ + psTACmd->asTADstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++; + } + + psTACmd->ui32Num3DSrcSyncs = psCCBKick->ui32Num3DSrcSyncs; + for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i]; + + psTACmd->as3DSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psTACmd->as3DSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + + /* Get ui32ReadOpsPending snapshot and copy into the CCB - before incrementing. */ + psTACmd->as3DSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++; + /* Copy ui32WriteOpsPending snapshot into the CCB. */ + psTACmd->as3DSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } +#else /* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + /* texture dependencies */ + psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs; + for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_SRC_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + + /* Get ui32ReadOpsPending snapshot and copy into the CCB - before incrementing. */ + psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++; + /* Copy ui32WriteOpsPending snapshot into the CCB. */ + psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + } +#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + + if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0) + { + PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo = + (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo; + SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM; + IMG_UINT32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects; + + PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo)->uAllocSize >= (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) + + (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) * ui32NumDstSyncs))); + + psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs; +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n"); + PDUMPMEM(IMG_NULL, + psHWDstSyncListMemInfo, + 0, + sizeof(SGXMKIF_HWDEVICE_SYNC_LIST), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + } +#endif + + for (i=0; i<ui32NumDstSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i]; + + if (psSyncInfo) + { + psSyncInfo->psSyncData->ui64LastWrite = ui64KickCount; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_KICK, KICK_TOKEN_DST_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr; + + psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++; + psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending; + + #if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + IMG_UINT32 ui32ModifiedValue; + IMG_UINT32 ui32SyncOffset = offsetof(SGXMKIF_HWDEVICE_SYNC_LIST, asSyncData) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)); + IMG_UINT32 ui32WOpsOffset = ui32SyncOffset + + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal); + IMG_UINT32 ui32ROpsOffset = ui32SyncOffset + + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal); + IMG_UINT32 ui32ROps2Offset = ui32SyncOffset + + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal); + + PDUMPCOMMENT("HWDeviceSyncObject for RT: %i\r\n", i); + + PDUMPMEM(IMG_NULL, + psHWDstSyncListMemInfo, + ui32SyncOffset, + sizeof(PVRSRV_DEVICE_SYNC_OBJECT), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + + if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) && + (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0)) + { + /* + * Init the ROpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT ROpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + /* + * Init the WOpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT WOpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + } + + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + + ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1; + + PDUMPCOMMENT("Modify RT %d WOpPendingVal in HWDevSyncList\r\n", i); + + PDUMPMEM(&ui32ModifiedValue, + psHWDstSyncListMemInfo, + ui32WOpsOffset, + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + + ui32ModifiedValue = 0; + PDUMPCOMMENT("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n", i); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psHWDstSyncListMemInfo, + ui32ROpsOffset, + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + + /* + * Force the ROps2Complete value to 0. + */ + PDUMPCOMMENT("Modify RT %d ROps2PendingVal in HWDevSyncList\r\n", i); + PDUMPMEM(&ui32ModifiedValue, + psHWDstSyncListMemInfo, + ui32ROps2Offset, + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psHWDstSyncListMemInfo)); + } + #endif /* defined(PDUMP) */ + } + else + { + psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr.uiAddr = 0; + psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr.uiAddr = 0; + psHWDeviceSyncList->asSyncData[i].sReadOps2CompleteDevVAddr.uiAddr = 0; + + psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = 0; + psHWDeviceSyncList->asSyncData[i].ui32ReadOps2PendingVal = 0; + psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = 0; + } + } + } + + /* + NOTE: THIS MUST BE THE LAST THING WRITTEN TO THE TA COMMAND! + Set the ready for so the uKernel will process the command. + */ + psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY; + +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + PDUMPCOMMENT("Shared part of TA command\r\n"); + + PDUMPMEM(psTACmd, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff, + sizeof(SGXMKIF_CMDTA_SHARED), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + +#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) + for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++) + { + IMG_UINT32 ui32ModifiedValue; + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i]; + + if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) && + (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0)) + { + /* + * Init the ROpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT TA-SRC ROpsComplete\r\n", i); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + /* + * Init the WOpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT TA-SRC WOpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + } + + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + + ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1; + + PDUMPCOMMENT("Modify TA SrcSync %d ROpsPendingVal\r\n", i); + + PDUMPMEM(&ui32ModifiedValue, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Modify TA SrcSync %d WOpPendingVal\r\n", i); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + } + + for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++) + { + IMG_UINT32 ui32ModifiedValue; + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i]; + + if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) && + (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0)) + { + /* + * Init the ROpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT TA-DST ROpsComplete\r\n", i); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + /* + * Init the WOpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT TA-DST WOpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + } + + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + + ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1; + + PDUMPCOMMENT("Modify TA DstSync %d WOpPendingVal\r\n", i); + + PDUMPMEM(&ui32ModifiedValue, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Modify TA DstSync %d ROpsPendingVal\r\n", i); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + } + + for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++) + { + IMG_UINT32 ui32ModifiedValue; + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i]; + + if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) && + (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0)) + { + /* + * Init the ROpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT 3D-SRC ROpsComplete\r\n", i); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + /* + * Init the WOpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT 3D-SRC WOpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + } + + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + + ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1; + + PDUMPCOMMENT("Modify 3D SrcSync %d ROpsPendingVal\r\n", i); + + PDUMPMEM(&ui32ModifiedValue, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Modify 3D SrcSync %d WOpPendingVal\r\n", i); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + } +#else/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++) + { + IMG_UINT32 ui32ModifiedValue; + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + + if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) && + (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0)) + { + /* + * Init the ROpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT ROpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + /* + * Init the WOpsComplete value to 0. + */ + PDUMPCOMMENT("Init RT WOpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete), + sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + /* + * Init the ROps2Complete value to 0. + */ + PDUMPCOMMENT("Init RT WOpsComplete\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psSyncInfo->psSyncDataMemInfoKM, + offsetof(PVRSRV_SYNC_DATA, ui32ReadOps2Complete), + sizeof(psSyncInfo->psSyncData->ui32ReadOps2Complete), + 0, + MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM)); + } + + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + + ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1; + + PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i); + + PDUMPMEM(&ui32ModifiedValue, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) + + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + } + + if (psCCBKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + + PDUMPCOMMENT("Modify TA/TQ ROpPendingVal\r\n"); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui32TATQSyncReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + } + + if (psCCBKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + + PDUMPCOMMENT("Modify 3D/TQ ROpPendingVal\r\n"); + + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, ui323DTQSyncReadOpsPendingVal), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + } + +#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + + for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) + { +#if !defined(SUPPORT_SGX_NEW_STATUS_VALS) + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i]; + PDUMPCOMMENT("Modify TA status value in TA cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); +#endif + } + + for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) + { +#if !defined(SUPPORT_SGX_NEW_STATUS_VALS) + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i]; + PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psCCBKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue), + sizeof(IMG_UINT32), + 0, + MAKEUNIQUETAG(psCCBMemInfo)); +#endif + } + } +#endif /* defined(PDUMP) */ + + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_CMD_END, + KICK_TOKEN_DOKICK); + + eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0, hDevMemContext, psCCBKick->bLastInScene); + if (eError == PVRSRV_ERROR_RETRY) + { + if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0) + { + for (i=0; i < psCCBKick->ui32NumDstSyncObjects; i++) + { + /* Client will retry, so undo the write ops pending increment done above. */ + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i]; + + if (psSyncInfo) + { + psSyncInfo->psSyncData->ui32WriteOpsPending--; +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + psSyncInfo->psSyncData->ui32LastOpDumpVal--; + } +#endif + } + } + } + +#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) + for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } +#else/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } +#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + + if (psCCBKick->hTA3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + + if (psCCBKick->hTASyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + + if (psCCBKick->h3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + KICK_TOKEN_DOKICK); + return eError; + } + else if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed.")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + KICK_TOKEN_DOKICK); + return eError; + } + + +#if defined(NO_HARDWARE) + + + /* TA/3D dependency */ + if (psCCBKick->hTA3DSyncInfo) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo; + + if (psCCBKick->bTADependency) + { + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + } + + if (psCCBKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo; + + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + + if (psCCBKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo; + + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->asTAStatusUpdate[i].hKernelMemInfo; + /* derive offset into meminfo and write the status value */ + *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM + + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr + - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue; +#endif + } + +#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS) + /* SRC and DST dependencies */ + for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } +#else/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + /* texture dependencies */ + for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } +#endif/* SUPPORT_SGX_GENERALISED_SYNCOBJECTS */ + + if (psCCBKick->bTerminateOrAbort) + { + if (psCCBKick->ui32NumDstSyncObjects > 0) + { + PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo = + (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo; + SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM; + + for (i=0; i<psCCBKick->ui32NumDstSyncObjects; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i]; + if (psSyncInfo) + psSyncInfo->psSyncData->ui32WriteOpsComplete = psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal+1; + } + } + + /* Copy status vals over */ + for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++) + { +#if defined(SUPPORT_SGX_NEW_STATUS_VALS) + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->as3DStatusUpdate[i].hKernelMemInfo; + /* derive offset into meminfo and write the status value */ + *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM + + (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.uiAddr + - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue; +#else + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue; +#endif + } + } +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_KICK, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + KICK_TOKEN_DOKICK); + return eError; +} + +/****************************************************************************** + End of file (sgxkick.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxpower.c b/pvr-source/services4/srvkm/devices/sgx/sgxpower.c new file mode 100644 index 0000000..2acd28d --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxpower.c @@ -0,0 +1,630 @@ +/*************************************************************************/ /*! +@Title Device specific power routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <stddef.h> + +#include "sgxdefs.h" +#include "services_headers.h" +#include "sgxapi_km.h" +#include "sgx_mkif_km.h" +#include "sgxutils.h" +#include "pdump_km.h" + +int powering_down = 0; + + +#if defined(SUPPORT_HW_RECOVERY) +static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE *psDeviceNode, + SGX_TIMING_INFORMATION *psSGXTimingInfo, + IMG_HANDLE *phTimer) +{ + /* + Install timer callback for HW recovery at 50 times lower + frequency than the microkernel timer. + */ + *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode, + 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq); + if(*phTimer == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"SGXAddTimer : Failed to register timer callback function")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return PVRSRV_OK; +} +#endif /* SUPPORT_HW_RECOVERY*/ + + +/*! +****************************************************************************** + + @Function SGXUpdateTimingInfo + + @Description + + Derives the microkernel timing info from the system-supplied values + + @Input psDeviceNode : SGX Device node + + @Return PVRSRV_ERROR : + +******************************************************************************/ +static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; +#if defined(SGX_DYNAMIC_TIMING_INFO) + SGX_TIMING_INFORMATION sSGXTimingInfo = {0}; +#else + SGX_DEVICE_MAP *psSGXDeviceMap; +#endif + IMG_UINT32 ui32ActivePowManSampleRate; + SGX_TIMING_INFORMATION *psSGXTimingInfo; + + +#if defined(SGX_DYNAMIC_TIMING_INFO) + psSGXTimingInfo = &sSGXTimingInfo; + SysGetSGXTimingInformation(psSGXTimingInfo); +#else + SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, + (IMG_VOID**)&psSGXDeviceMap); + psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo; +#endif + +#if defined(SUPPORT_HW_RECOVERY) + { + PVRSRV_ERROR eError; + IMG_UINT32 ui32OlduKernelFreq; + + if (psDevInfo->hTimer != IMG_NULL) + { + ui32OlduKernelFreq = psDevInfo->ui32CoreClockSpeed / psDevInfo->ui32uKernelTimerClock; + if (ui32OlduKernelFreq != psSGXTimingInfo->ui32uKernelFreq) + { + /* + The ukernel timer frequency has changed. + */ + IMG_HANDLE hNewTimer; + + eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &hNewTimer); + if (eError == PVRSRV_OK) + { + eError = OSRemoveTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXUpdateTimingInfo: Failed to remove timer")); + } + psDevInfo->hTimer = hNewTimer; + } + else + { + /* Failed to allocate the new timer, leave the old one. */ + } + } + } + else + { + eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + return eError; + } + } + + psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate = + psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq; + } +#endif /* SUPPORT_HW_RECOVERY*/ + + /* Copy the SGX clock speed for use in the kernel */ + psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed; + psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq; + + /* FIXME: no need to duplicate - remove it from psDevInfo */ + psDevInfo->psSGXHostCtl->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock; +#if defined(PDUMP) + PDUMPCOMMENT("Host Control - Microkernel clock"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); +#endif /* PDUMP */ + + if (psSGXTimingInfo->bEnableActivePM) + { + ui32ActivePowManSampleRate = + psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000; + /* + ui32ActivePowerCounter has the value 0 when SGX is not idle. + When SGX becomes idle, the value of ui32ActivePowerCounter is changed from 0 to ui32ActivePowManSampleRate. + The ukernel timer routine decrements the value of ui32ActivePowerCounter if it is not 0. + When the ukernel timer decrements ui32ActivePowerCounter from 1 to 0, the ukernel timer will + request power down. + Therefore the minimum value of ui32ActivePowManSampleRate is 1. + */ + ui32ActivePowManSampleRate += 1; + } + else + { + ui32ActivePowManSampleRate = 0; + } + + psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = ui32ActivePowManSampleRate; +#if defined(PDUMP) + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); +#endif /* PDUMP */ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXStartTimer + + @Description + + Start the microkernel timer + + @Input psDevInfo : SGX Device Info + + @Return IMG_VOID : + +******************************************************************************/ +static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO *psDevInfo) +{ + #if defined(SUPPORT_HW_RECOVERY) + PVRSRV_ERROR eError; + + eError = OSEnableTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer")); + } + #else + PVR_UNREFERENCED_PARAMETER(psDevInfo); + #endif /* SUPPORT_HW_RECOVERY */ +} + + +/*! +****************************************************************************** + + @Function SGXPollForClockGating + + @Description + + Wait until the SGX core clocks have gated. + + @Input psDevInfo : SGX Device Info + @Input ui32Register : Offset of register to poll + @Input ui32Register : Value of register to poll for + @Input pszComment : Description of poll + + @Return IMG_VOID : + +******************************************************************************/ +static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32Register, + IMG_UINT32 ui32RegisterValue, + IMG_CHAR *pszComment) +{ + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32Register); + PVR_UNREFERENCED_PARAMETER(ui32RegisterValue); + PVR_UNREFERENCED_PARAMETER(pszComment); + + #if !defined(NO_HARDWARE) + PVR_ASSERT(psDevInfo != IMG_NULL); + + /* PRQA S 0505 1 */ /* QAC does not like assert() */ + if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2), + 0, + ui32RegisterValue, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPollForClockGating: %s failed.", pszComment)); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + } + #endif /* NO_HARDWARE */ + + PDUMPCOMMENT("%s", pszComment); + PDUMPREGPOL(SGX_PDUMPREG_NAME, ui32Register, 0, ui32RegisterValue, PDUMP_POLL_OPERATOR_EQUAL); +} + + +/*! +****************************************************************************** + + @Function SGXPrePowerState + + @Description + + does necessary preparation before power state transition + + @Input hDevHandle : SGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + if ((eNewPowerState != eCurrentPowerState) && + (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32PowerCmd, ui32CompleteStatus; + SGXMKIF_COMMAND sCommand = {0}; + IMG_UINT32 ui32Core; + IMG_UINT32 ui32CoresEnabled; + + #if defined(SUPPORT_HW_RECOVERY) + /* Disable timer callback for HW recovery */ + eError = OSDisableTimer(psDevInfo->hTimer); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer")); + return eError; + } + #endif /* SUPPORT_HW_RECOVERY */ + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* Request the ukernel to idle SGX and save its state. */ + ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF; + ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE; + PDUMPCOMMENT("SGX power off request"); + } + else + { + /* Request the ukernel to idle SGX. */ + ui32PowerCmd = PVRSRV_POWERCMD_IDLE; + ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE; + PDUMPCOMMENT("SGX idle request"); + } + + powering_down = 1; + + sCommand.ui32Data[1] = ui32PowerCmd; + + eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command")); + return eError; + } + + /* Wait for the ukernel to complete processing. */ + #if !defined(NO_HARDWARE) + if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus, + ui32CompleteStatus, + ui32CompleteStatus, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed.")); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + } + #endif /* NO_HARDWARE */ + + psDevInfo->bSGXIdle = IMG_TRUE; + + #if defined(PDUMP) + PDUMPCOMMENT("TA/3D CCB Control - Wait for power event on uKernel."); + PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus), + ui32CompleteStatus, + ui32CompleteStatus, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + +#if defined(SGX_FEATURE_MP) + ui32CoresEnabled = ((OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE) & EUR_CR_MASTER_CORE_ENABLE_MASK) >> EUR_CR_MASTER_CORE_ENABLE_SHIFT) + 1; +#else + ui32CoresEnabled = 1; +#endif + + for (ui32Core = 0; ui32Core < ui32CoresEnabled; ui32Core++) + { + /* Wait for SGX clock gating. */ + SGXPollForClockGating(psDevInfo, + SGX_MP_CORE_SELECT(psDevInfo->ui32ClkGateStatusReg, ui32Core), + psDevInfo->ui32ClkGateStatusMask, + "Wait for SGX clock gating"); + } + + #if defined(SGX_FEATURE_MP) + /* Wait for SGX master clock gating. */ + SGXPollForClockGating(psDevInfo, + psDevInfo->ui32MasterClkGateStatusReg, + psDevInfo->ui32MasterClkGateStatusMask, + "Wait for SGX master clock gating"); + + SGXPollForClockGating(psDevInfo, + psDevInfo->ui32MasterClkGateStatus2Reg, + psDevInfo->ui32MasterClkGateStatus2Mask, + "Wait for SGX master clock gating (2)"); + #endif /* SGX_FEATURE_MP */ + + if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* Finally, de-initialise some registers. */ + eError = SGXDeinitialise(psDevInfo); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: SGXDeinitialise failed: %u", eError)); + return eError; + } + } + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXPostPowerState + + @Description + + does necessary preparation after power state transition + + @Input hDevHandle : SGX Device Node + @Input eNewPowerState : New power state + @Input eCurrentPowerState : Current power state + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE hDevHandle, + PVRSRV_DEV_POWER_STATE eNewPowerState, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + if ((eNewPowerState != eCurrentPowerState) && + (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) + { + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + + /* Reset the power manager flags. */ + psSGXHostCtl->ui32PowerStatus = 0; + #if defined(PDUMP) + PDUMPCOMMENT("Host Control - Reset power status"); + PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus), + sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo)); + #endif /* PDUMP */ + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) + { + /* + Coming up from off, re-initialise SGX. + */ + + /* + Re-generate the timing data required by SGX. + */ + eError = SGXUpdateTimingInfo(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed")); + return eError; + } + + /* + Run the SGX init script. + */ + eError = SGXInitialise(psDevInfo, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed")); + return eError; + } + powering_down = 0; + } + else + { + /* + Coming up from idle, restart the ukernel. + */ + SGXMKIF_COMMAND sCommand = {0}; + + sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME; + eError = SGXScheduleCCBCommand(psDeviceNode, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %u", eError)); + return eError; + } + } + + SGXStartTimer(psDevInfo); + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXPreClockSpeedChange + + @Description + + Does processing required before an SGX clock speed change. + + @Input hDevHandle : SGX Device Node + @Input bIdleDevice : Whether the microkernel needs to be idled + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPreClockSpeedChange (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_ERROR eError; + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) + { + if (bIdleDevice) + { + /* + * Idle SGX. + */ + PDUMPSUSPEND(); + + eError = SGXPrePowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_IDLE, + PVRSRV_DEV_POWER_STATE_ON); + + if (eError != PVRSRV_OK) + { + PDUMPRESUME(); + return eError; + } + } + } + + PVR_DPF((PVR_DBG_MESSAGE,"SGXPreClockSpeedChange: SGX clock speed was %uHz", + psDevInfo->ui32CoreClockSpeed)); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXPostClockSpeedChange + + @Description + + Does processing required after an SGX clock speed change. + + @Input hDevHandle : SGX Device Node + @Input bIdleDevice : Whether the microkernel had been idled previously + @Input eCurrentPowerState : Power state of the device + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR SGXPostClockSpeedChange (IMG_HANDLE hDevHandle, + IMG_BOOL bIdleDevice, + PVRSRV_DEV_POWER_STATE eCurrentPowerState) +{ + PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed; + + PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed); + + if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) + { + PVRSRV_ERROR eError; + + /* + Re-generate the timing data required by SGX. + */ + eError = SGXUpdateTimingInfo(psDeviceNode); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed")); + return eError; + } + + if (bIdleDevice) + { + /* + * Resume SGX. + */ + eError = SGXPostPowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_ON, + PVRSRV_DEV_POWER_STATE_IDLE); + + PDUMPRESUME(); + + if (eError != PVRSRV_OK) + { + return eError; + } + } + else + { + SGXStartTimer(psDevInfo); + } + } + + PVR_DPF((PVR_DBG_MESSAGE,"SGXPostClockSpeedChange: SGX clock speed changed from %uHz to %uHz", + ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed)); + + return PVRSRV_OK; +} + + +/****************************************************************************** + End of file (sgxpower.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxreset.c b/pvr-source/services4/srvkm/devices/sgx/sgxreset.c new file mode 100644 index 0000000..dcdefae --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxreset.c @@ -0,0 +1,808 @@ +/*************************************************************************/ /*! +@Title Device specific reset routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "sgxdefs.h" +#include "sgxmmu.h" +#include "services_headers.h" +#include "sgxinfokm.h" +#include "sgxconfig.h" +#include "sgxutils.h" + +#include "pdump_km.h" + + +/*! +******************************************************************************* + + @Function SGXInitClocks + + @Description + Initialise the SGX clocks + + @Input psDevInfo - device info. structure + @Input ui32PDUMPFlags - flags to control PDUMP output + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXInitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags) +{ + IMG_UINT32 ui32RegVal; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + ui32RegVal = psDevInfo->ui32ClkGateCtl; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL, ui32RegVal, ui32PDUMPFlags); + +#if defined(EUR_CR_CLKGATECTL2) + ui32RegVal = psDevInfo->ui32ClkGateCtl2; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL2, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_CLKGATECTL2, ui32RegVal, ui32PDUMPFlags); +#endif +} + + +/*! +******************************************************************************* + + @Function SGXResetInitBIFContexts + + @Description + Initialise the BIF memory contexts + + @Input psDevInfo - SGX Device Info + + @Return IMG_VOID + +******************************************************************************/ +static IMG_VOID SGXResetInitBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags) +{ + IMG_UINT32 ui32RegVal; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF bank settings\r\n"); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); +#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF directory list\r\n"); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + { + IMG_UINT32 ui32DirList, ui32DirListReg; + + for (ui32DirList = 1; + ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS; + ui32DirList++) + { + ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, ui32DirListReg, ui32RegVal, ui32PDUMPFlags); + } + } +#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ +} + + +/*! +******************************************************************************* + + @Function SGXResetSetupBIFContexts + + @Description + Configure the BIF for the EDM context + + @Input psDevInfo - SGX Device Info + + @Return IMG_VOID + +******************************************************************************/ +static IMG_VOID SGXResetSetupBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags) +{ + IMG_UINT32 ui32RegVal; + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + /* Set up EDM for bank 0 to point at kernel context */ + ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT); + + #if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA) + /* Set up 2D core for bank 0 to point at kernel context */ + ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT); + #endif /* SGX_FEATURE_2D_HARDWARE */ + + #if defined(FIX_HW_BRN_23410) + /* Set up TA core for bank 0 to point at kernel context to guarantee it is a valid context */ + ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT); + #endif /* FIX_HW_BRN_23410 */ + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Set up EDM requestor page table in BIF\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); + #endif /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) */ + + { + IMG_UINT32 ui32EDMDirListReg; + + /* Set up EDM context with kernel page directory */ + #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0) + ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0; + #else + /* Bases 0 and 1 are not necessarily contiguous */ + ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1); + #endif /* SGX_BIF_DIR_LIST_INDEX_EDM */ + + ui32RegVal = psDevInfo->sKernelPDDevPAddr.uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT; + +#if defined(FIX_HW_BRN_28011) + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); + PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); +#endif + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the EDM's directory list base\r\n"); + PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, ui32EDMDirListReg, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); + } +} + + +/*! +******************************************************************************* + + @Function SGXResetSleep + + @Description + + Sleep for a short time to allow reset register writes to complete. + Required because no status registers are available to poll on. + + @Input psDevInfo - SGX Device Info + @Input ui32PDUMPFlags - flags to control PDUMP output + @Input bPDump - Pdump the sleep + + @Return Nothing + +******************************************************************************/ +static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags, + IMG_BOOL bPDump) +{ +#if defined(PDUMP) || defined(EMULATOR) + IMG_UINT32 ui32ReadRegister; + + #if defined(SGX_FEATURE_MP) + ui32ReadRegister = EUR_CR_MASTER_SOFT_RESET; + #else + ui32ReadRegister = EUR_CR_SOFT_RESET; + #endif /* SGX_FEATURE_MP */ +#endif + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + /* Sleep for 100 SGX clocks */ + SGXWaitClocks(psDevInfo, 100); + if (bPDump) + { + PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags); +#if defined(PDUMP) + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Read back to flush the register writes\r\n"); + PDumpRegRead(SGX_PDUMPREG_NAME, ui32ReadRegister, ui32PDUMPFlags); +#endif + } + +#if defined(EMULATOR) + /* + Read a register to make sure we wait long enough on the emulator... + */ + OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32ReadRegister); +#endif +} + + +#if !defined(SGX_FEATURE_MP) +/*! +******************************************************************************* + + @Function SGXResetSoftReset + + @Description + + Write to the SGX soft reset register. + + @Input psDevInfo - SGX Device Info + @Input bResetBIF - Include the BIF in the soft reset + @Input ui32PDUMPFlags - flags to control PDUMP output + @Input bPDump - Pdump the sleep + + @Return Nothing + +******************************************************************************/ +static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bResetBIF, + IMG_UINT32 ui32PDUMPFlags, + IMG_BOOL bPDump) +{ + IMG_UINT32 ui32SoftResetRegVal; + + ui32SoftResetRegVal = + /* add common reset bits: */ + EUR_CR_SOFT_RESET_DPM_RESET_MASK | + EUR_CR_SOFT_RESET_TA_RESET_MASK | + EUR_CR_SOFT_RESET_USE_RESET_MASK | + EUR_CR_SOFT_RESET_ISP_RESET_MASK | + EUR_CR_SOFT_RESET_TSP_RESET_MASK; + +/* add conditional reset bits: */ +#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK; +#endif +#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK) + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK; +#endif + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + if (bResetBIF) + { + ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK; + } + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags); + } +} + + +/*! +******************************************************************************* + + @Function SGXResetInvalDC + + @Description + + Invalidate the BIF Directory Cache and wait for the operation to complete. + + @Input psDevInfo - SGX Device Info + @Input ui32PDUMPFlags - flags to control PDUMP output + + @Return Nothing + +******************************************************************************/ +static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32PDUMPFlags, + IMG_BOOL bPDump) +{ + IMG_UINT32 ui32RegVal; + + /* Invalidate BIF Directory cache. */ +#if defined(EUR_CR_BIF_CTRL_INVAL) + ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, ui32RegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL_INVAL, ui32RegVal, ui32PDUMPFlags); + } +#else + ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + } + + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + if (bPDump) + { + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + } +#endif + SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump); + +#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) + { + /* + Wait for the DC invalidate to complete - indicated by + outstanding reads reaching zero. + */ + if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT), + 0, + EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed.")); + PVR_DBG_BREAK; + } + + if (bPDump) + { + PDUMPREGPOLWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags, PDUMP_POLL_OPERATOR_EQUAL); + } + } +#endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ +} +#endif /* SGX_FEATURE_MP */ + + +/*! +******************************************************************************* + + @Function SGXReset + + @Description + + Reset chip + + @Input psDevInfo - device info. structure + @Input bHardwareRecovery - true if recovering powered hardware, + false if powering up + @Input ui32PDUMPFlags - flags to control PDUMP output + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_BOOL bHardwareRecovery, + IMG_UINT32 ui32PDUMPFlags) +#if !defined(SGX_FEATURE_MP) +{ + IMG_UINT32 ui32RegVal; +#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK) + const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK; +#else + const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; +#endif + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n"); + +#if defined(FIX_HW_BRN_23944) + /* Pause the BIF. */ + ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); + if (ui32RegVal & ui32BifFaultMask) + { + /* Page fault needs to be cleared before resetting the BIF. */ + ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + } +#endif /* defined(FIX_HW_BRN_23944) */ + + /* Reset all including BIF */ + SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + /* + Initialise the BIF state. + */ +#if defined(SGX_FEATURE_36BIT_MMU) + /* enable 36bit addressing mode if the MMU supports it*/ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags); +#endif + + SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags); + +#if defined(EUR_CR_BIF_MEM_ARB_CONFIG) + /* + Initialise the memory arbiter to its default state + */ + ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) | + (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) | + (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags); +#endif /* EUR_CR_BIF_MEM_ARB_CONFIG */ + +#if defined(SGX_FEATURE_SYSTEM_CACHE) + #if defined(SGX_BYPASS_SYSTEM_CACHE) + /* set the SLC to bypass all accesses */ + ui32RegVal = MNE_CR_CTRL_BYPASS_ALL_MASK; + #else + #if defined(FIX_HW_BRN_26620) + ui32RegVal = 0; + #else + /* set the SLC to bypass cache-coherent accesses */ + ui32RegVal = MNE_CR_CTRL_BYP_CC_MASK; + #endif + #if defined(FIX_HW_BRN_34028) + /* Bypass the MNE for the USEC requester */ + ui32RegVal |= (8 << MNE_CR_CTRL_BYPASS_SHIFT); + #endif + #endif /* SGX_BYPASS_SYSTEM_CACHE */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, MNE_CR_CTRL, ui32RegVal); + PDUMPREG(SGX_PDUMPREG_NAME, MNE_CR_CTRL, ui32RegVal); +#endif /* SGX_FEATURE_SYSTEM_CACHE */ + + if (bHardwareRecovery) + { + /* + Set all requestors to the dummy PD which forces all memory + accesses to page fault. + This enables us to flush out BIF requests from parts of SGX + which do not have their own soft reset. + Note: sBIFResetPDDevPAddr.uiAddr is a relative address (2GB max) + MSB is the bus master flag; 1 == enabled + */ + ui32RegVal = (IMG_UINT32)psDevInfo->sBIFResetPDDevPAddr.uiAddr; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Bring BIF out of reset. */ + SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* + Check for a page fault from parts of SGX which do not have a reset. + */ + for (;;) + { + IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); + IMG_DEV_VIRTADDR sBifFault; + IMG_UINT32 ui32PDIndex, ui32PTIndex; + + if ((ui32BifIntStat & ui32BifFaultMask) == 0) + { + break; + } + + /* + There is a page fault, so reset the BIF again, map in the dummy page, + bring the BIF up and invalidate the Directory Cache. + */ + sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); + PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr)); + ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); + ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; + + /* Put the BIF into reset. */ + SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE); + + /* Map in the dummy page. */ + psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr + >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) + | SGX_MMU_PDE_PAGE_SIZE_4K + | SGX_MMU_PDE_VALID; + psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr + >>SGX_MMU_PTE_ADDR_ALIGNSHIFT) + | SGX_MMU_PTE_VALID; + + /* Clear outstanding events. */ + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal); + ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Bring the BIF out of reset. */ + SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE); + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Invalidate Directory Cache. */ + SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + + /* Unmap the dummy page and try again. */ + psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; + psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; + } + } + else + { + /* Bring BIF out of reset. */ + SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); + } + + /* + Initialise the BIF memory contexts before bringing the rest of SGX out of reset. + */ + SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA) + /* check that the heap base has the right alignment (1Mb) */ + #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0) + #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment" + #endif + /* Set up 2D requestor base */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags); +#endif + + /* Invalidate BIF Directory cache. */ + SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX")); + + /* Take chip out of reset */ + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); + + /* wait a bit */ + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n"); +} + +#else + +{ + IMG_UINT32 ui32RegVal; + + PVR_UNREFERENCED_PARAMETER(bHardwareRecovery); + +#if !defined(PDUMP) + PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); +#endif /* PDUMP */ + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX MP reset sequence\r\n"); + + /* Put hydra into soft reset */ + ui32RegVal = EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK | + EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK | + EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK | + EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK; + + if (bHardwareRecovery) + { + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_MCI_RESET_MASK; + } + +#if defined(SGX_FEATURE_PTLA) + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_PTLA_RESET_MASK; +#endif +#if defined(SGX_FEATURE_SYSTEM_CACHE) + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK; +#endif + + /* Hard reset the slave cores */ + ui32RegVal |= EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(0) | + EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(1) | + EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(2) | + EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(3); + + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Soft reset hydra partition, hard reset the cores\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra BIF control\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); + +#if defined(SGX_FEATURE_SYSTEM_CACHE) + #if defined(SGX_BYPASS_SYSTEM_CACHE) + ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_MASK; + #else + ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK | + #if defined(FIX_HW_BRN_30954) + EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_MASK | + #endif + #if defined(PVR_SLC_8KB_ADDRESS_MODE) + (4 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) | + #endif + #if defined(FIX_HW_BRN_33809) + (2 << EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT) | + #endif + (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT); + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC control\r\n"); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL, ui32RegVal); + + ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK; + #if defined(FIX_HW_BRN_31620) + ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_MMU_MASK; + #endif + #if defined(FIX_HW_BRN_31195) + ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_MASK | + EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_MASK; + #endif + #endif /* SGX_BYPASS_SYSTEM_CACHE */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the hydra SLC bypass control\r\n"); + PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); +#endif /* SGX_FEATURE_SYSTEM_CACHE */ + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + /* Remove the resets */ + ui32RegVal = 0; + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal); + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Remove the resets from all of SGX\r\n"); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Turn on the slave cores' clock gating\r\n"); + SGXInitClocks(psDevInfo, ui32PDUMPFlags); + + SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the slave BIFs\r\n"); + +#if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_31620) || defined(FIX_HW_BRN_31671) || defined(FIX_HW_BRN_32085) + #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085) + /* disable prefetch */ + ui32RegVal = (1<<EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT); + #else + ui32RegVal = (1<<EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT) | EUR_CR_MASTER_BIF_MMU_CTRL_PREFETCHING_ON_MASK; + #endif + #if !defined(FIX_HW_BRN_31620) && !defined(FIX_HW_BRN_31671) + /* enable the DC TLB */ + ui32RegVal |= EUR_CR_MASTER_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK; + #endif + + /* Master bank */ + OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_MMU_CTRL, ui32RegVal, ui32PDUMPFlags); + + #if defined(FIX_HW_BRN_31278) || defined(FIX_HW_BRN_32085) + /* disable prefetch */ + ui32RegVal = (1<<EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT); + #else + ui32RegVal = (1<<EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT) | EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK; + #endif + #if !defined(FIX_HW_BRN_31620) && !defined(FIX_HW_BRN_31671) + /* enable the DC TLB */ + ui32RegVal |= EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK; + #endif + + /* Per-core */ + { + IMG_UINT32 ui32Core; + + for (ui32Core=0;ui32Core<SGX_FEATURE_MP_CORE_COUNT;ui32Core++) + { + OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal); + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_BIF_MMU_CTRL, ui32Core), ui32RegVal, ui32PDUMPFlags); + } + } +#endif + + SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags); + SGXResetSetupBIFContexts(psDevInfo, ui32PDUMPFlags); + + PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX MP reset sequence\r\n"); +} +#endif /* SGX_FEATURE_MP */ + + +/****************************************************************************** + End of file (sgxreset.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxtransfer.c b/pvr-source/services4/srvkm/devices/sgx/sgxtransfer.c new file mode 100644 index 0000000..81f3b07 --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxtransfer.c @@ -0,0 +1,814 @@ +/*************************************************************************/ /*! +@Title Device specific transfer queue routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined(TRANSFER_QUEUE) + +#include <stddef.h> + +#include "sgxdefs.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "sgxinfo.h" +#include "sysconfig.h" +#include "pdump_km.h" +#include "mmu.h" +#include "pvr_bridge.h" +#include "sgx_bridge_km.h" +#include "sgxinfokm.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "sgxutils.h" +#include "ttrace.h" + +#if defined (SUPPORT_SID_INTERFACE) +IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK_KM *psKick) +#else +IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick) +#endif +{ + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo; + SGXMKIF_COMMAND sCommand = {0}; + SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + IMG_UINT32 loop; + IMG_HANDLE hDevMemContext = IMG_NULL; + IMG_BOOL abSrcSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS]; + IMG_UINT32 ui32RealSrcSyncNum = 0; + IMG_BOOL abDstSyncEnable[SGX_MAX_TRANSFER_SYNC_OPS]; + IMG_UINT32 ui32RealDstSyncNum = 0; + + +#if defined(PDUMP) + IMG_BOOL bPersistentProcess = IMG_FALSE; + /* + * For persistent processes, the HW kicks should not go into the + * extended init phase; only keep memory transactions from the + * window system which are necessary to run the client app. + */ + { + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc != IMG_NULL) + { + bPersistentProcess = psPerProc->bPDumpPersistent; + } + } +#endif /* PDUMP */ +#if defined(FIX_HW_BRN_31620) + hDevMemContext = psKick->hDevMemContext; +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_ENTER, TRANSFER_TOKEN_SUBMIT); + + for (loop = 0; loop < SGX_MAX_TRANSFER_SYNC_OPS; loop++) + { + abSrcSyncEnable[loop] = IMG_TRUE; + abDstSyncEnable[loop] = IMG_TRUE; + } + + if (!CCB_OFFSET_IS_VALID(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset)) + { + PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: Invalid CCB offset")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + TRANSFER_TOKEN_SUBMIT); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* override QAC warning about stricter alignment */ + /* PRQA S 3305 1 */ + psSharedTransferCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset); + + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_START, TRANSFER_TOKEN_SUBMIT); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CCB, + TRANSFER_TOKEN_CCB_OFFSET, psKick->ui32SharedCmdCCBOffset); + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_TA_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++; + psSharedTransferCmd->ui32TASyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + else + { + psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0; + psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_3D_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++; + psSharedTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + else + { + psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0; + psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0; + } + + /* filter out multiple occurrences of the same sync object from srcs or dests + * note : the same sync can still be used to synchronize both src and dst. + */ + for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumSrcSync); loop++) + { + IMG_UINT32 i; + + PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + + for (i = 0; i < loop; i++) + { + if (abSrcSyncEnable[i]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i]; + + if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr) + { + PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same src synchronized multiple times!")); + abSrcSyncEnable[loop] = IMG_FALSE; + break; + } + } + } + if (abSrcSyncEnable[loop]) + { + ui32RealSrcSyncNum++; + } + } + for (loop = 0; loop < MIN(SGX_MAX_TRANSFER_SYNC_OPS, psKick->ui32NumDstSync); loop++) + { + IMG_UINT32 i; + + PVRSRV_KERNEL_SYNC_INFO * psMySyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + + for (i = 0; i < loop; i++) + { + if (abDstSyncEnable[i]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i]; + + if (psSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr == psMySyncInfo->sWriteOpsCompleteDevVAddr.uiAddr) + { + PVR_DPF((PVR_DBG_WARNING, "SGXSubmitTransferKM : Same dst synchronized multiple times!")); + abDstSyncEnable[loop] = IMG_FALSE; + break; + } + } + } + if (abDstSyncEnable[loop]) + { + ui32RealDstSyncNum++; + } + } + + psSharedTransferCmd->ui32NumSrcSyncs = ui32RealSrcSyncNum; + psSharedTransferCmd->ui32NumDstSyncs = ui32RealDstSyncNum; + + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) + { + IMG_UINT32 i = 0; + + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_SRC_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + psSharedTransferCmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + psSharedTransferCmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + i++; + } + } + PVR_ASSERT(i == ui32RealSrcSyncNum); + + i = 0; + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + + psSyncInfo->psSyncData->ui64LastWrite = ui64KickCount; + + PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_TRANSFER, TRANSFER_TOKEN_DST_SYNC, + psSyncInfo, PVRSRV_SYNCOP_SAMPLE); + + psSharedTransferCmd->asDstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + psSharedTransferCmd->asDstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + psSharedTransferCmd->asDstSyncs[i].ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending; + + psSharedTransferCmd->asDstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + psSharedTransferCmd->asDstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + psSharedTransferCmd->asDstSyncs[i].sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr; + i++; + } + } + PVR_ASSERT(i == ui32RealDstSyncNum); + + /* + * We allow source and destination sync objects to be the + * same, which is why the read/write pending updates are delayed + * until the transfer command has been updated with the current + * values from the objects. + */ + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + psSyncInfo->psSyncData->ui32ReadOpsPending++; + } + } + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + psSyncInfo->psSyncData->ui32WriteOpsPending++; + } + } + } + +#if defined(PDUMP) + if ((PDumpIsCaptureFrameKM() + || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + && (bPersistentProcess == IMG_FALSE) ) + { + PDUMPCOMMENT("Shared part of transfer command\r\n"); + PDUMPMEM(psSharedTransferCmd, + psCCBMemInfo, + psKick->ui32CCBDumpWOff, + sizeof(SGXMKIF_TRANSFERCMD_SHARED), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) + { + IMG_UINT32 i = 0; + + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = psKick->ahSrcSyncInfo[loop]; + + PDUMPCOMMENT("Tweak src surface write op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak src surface read op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asSrcSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + i++; + } + } + + i = 0; + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[i]) + { + IMG_UINT32 ui32PDumpReadOp2 = 0; + psSyncInfo = psKick->ahDstSyncInfo[loop]; + + PDUMPCOMMENT("Tweak dest surface write op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak dest surface read op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak dest surface read op2 in transfer cmd\r\n"); + PDUMPMEM(&ui32PDumpReadOp2, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, asDstSyncs) + i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOps2PendingVal)), + sizeof(ui32PDumpReadOp2), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + i++; + } + } + + /* + * We allow the first source and destination sync objects to be the + * same, which is why the read/write pending updates are delayed + * until the transfer command has been updated with the current + * values from the objects. + */ + for (loop = 0; loop < (psKick->ui32NumSrcSync); loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + } + } + + for (loop = 0; loop < (psKick->ui32NumDstSync); loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0]; + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + } + } + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hTASyncInfo; + + PDUMPCOMMENT("Tweak TA/TQ surface write op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32TASyncWriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->h3DSyncInfo; + + PDUMPCOMMENT("Tweak 3D/TQ surface write op in transfer cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)(offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui323DSyncWriteOpsPendingVal)), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + } + } +#endif + + sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr; + + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_CMD_END, + TRANSFER_TOKEN_SUBMIT); + + eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE); + + if (eError == PVRSRV_ERROR_RETRY) + { + /* Client will retry, so undo the sync ops pending increment(s) done above. */ + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL) + { + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + psSyncInfo->psSyncData->ui32ReadOpsPending--; +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM() + || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + psSyncInfo->psSyncData->ui32LastReadOpDumpVal--; + } +#endif + } + } + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + psSyncInfo->psSyncData->ui32WriteOpsPending--; +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM() + || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + psSyncInfo->psSyncData->ui32LastOpDumpVal--; + } +#endif + } + } + } + + /* Command needed to be synchronised with the TA? */ + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + + /* Command needed to be synchronised with the 3D? */ + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + } + + else if (PVRSRV_OK != eError) + { + PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed.")); + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + TRANSFER_TOKEN_SUBMIT); + return eError; + } + + +#if defined(NO_HARDWARE) + if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0) + { + /* Update sync objects pretending that we have done the job*/ + for (loop = 0; loop < psKick->ui32NumSrcSync; loop++) + { + if (abSrcSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[loop]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + } + + for (loop = 0; loop < psKick->ui32NumDstSync; loop++) + { + if (abDstSyncEnable[loop]) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[loop]; + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + } +#endif + PVR_TTRACE(PVRSRV_TRACE_GROUP_TRANSFER, PVRSRV_TRACE_CLASS_FUNCTION_EXIT, + TRANSFER_TOKEN_SUBMIT); + return eError; +} + +#if defined(SGX_FEATURE_2D_HARDWARE) +#if defined (SUPPORT_SID_INTERFACE) +IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK_KM *psKick) +#else +IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick) +#endif + +{ + PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo; + SGXMKIF_COMMAND sCommand = {0}; + SGXMKIF_2DCMD_SHARED *ps2DCmd; + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo; + PVRSRV_ERROR eError; + IMG_UINT32 i; + IMG_HANDLE hDevMemContext = IMG_NULL; +#if defined(PDUMP) + IMG_BOOL bPersistentProcess = IMG_FALSE; + /* + * For persistent processes, the HW kicks should not go into the + * extended init phase; only keep memory transactions from the + * window system which are necessary to run the client app. + */ + { + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc != IMG_NULL) + { + bPersistentProcess = psPerProc->bPDumpPersistent; + } + } +#endif /* PDUMP */ +#if defined(FIX_HW_BRN_31620) + hDevMemContext = psKick->hDevMemContext; +#endif + + if (!CCB_OFFSET_IS_VALID(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset)) + { + PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + /* override QAC warning about stricter alignment */ + /* PRQA S 3305 1 */ + ps2DCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset); + + OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd)); + + /* Command needs to be synchronised with the TA? */ + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + ps2DCmd->sTASyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++; + ps2DCmd->sTASyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + + /* Command needs to be synchronised with the 3D? */ + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++; + ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + + /* + * We allow the first source and destination sync objects to be the + * same, which is why the read/write pending updates are delayed + * until the transfer command has been updated with the current + * values from the objects. + */ + ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync; + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + + ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + + ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + + ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending; + ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending; + ps2DCmd->sDstSyncData.ui32ReadOps2PendingVal = psSyncInfo->psSyncData->ui32ReadOps2Pending; + + ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; + ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr; + ps2DCmd->sDstSyncData.sReadOps2CompleteDevVAddr = psSyncInfo->sReadOps2CompleteDevVAddr; + } + + /* Read/Write ops pending updates, delayed from above */ + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsPending++; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + psSyncInfo->psSyncData->ui32WriteOpsPending++; + } + +#if defined(PDUMP) + if ((PDumpIsCaptureFrameKM() + || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + && (bPersistentProcess == IMG_FALSE) ) + { + /* Pdump the command from the per context CCB */ + PDUMPCOMMENT("Shared part of 2D command\r\n"); + PDUMPMEM(ps2DCmd, + psCCBMemInfo, + psKick->ui32CCBDumpWOff, + sizeof(SGXMKIF_2DCMD_SHARED), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + + PDUMPCOMMENT("Tweak src surface write op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32WriteOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak src surface read op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32ReadOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + IMG_UINT32 ui32PDumpReadOp2 = 0; + psSyncInfo = psKick->hDstSyncInfo; + + PDUMPCOMMENT("Tweak dest surface write op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32WriteOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + + PDUMPCOMMENT("Tweak dest surface read op in 2D cmd\r\n"); + PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOpsPendingVal), + sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + PDUMPCOMMENT("Tweak dest surface read op2 in 2D cmd\r\n"); + PDUMPMEM(&ui32PDumpReadOp2, + psCCBMemInfo, + psKick->ui32CCBDumpWOff + (IMG_UINT32)offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOps2PendingVal), + sizeof(ui32PDumpReadOp2), + psKick->ui32PDumpFlags, + MAKEUNIQUETAG(psCCBMemInfo)); + } + + /* Read/Write ops pending updates, delayed from above */ + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32LastReadOpDumpVal++; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + psSyncInfo->psSyncData->ui32LastOpDumpVal++; + } + } +#endif + + sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr; + + eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags, hDevMemContext, IMG_FALSE); + + if (eError == PVRSRV_ERROR_RETRY) + { + /* Client will retry, so undo the write ops pending increment + done above. + */ +#if defined(PDUMP) + if (PDumpIsCaptureFrameKM()) + { + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32LastReadOpDumpVal--; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + psSyncInfo->psSyncData->ui32LastOpDumpVal--; + } + } +#endif + + for (i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsPending--; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = psKick->hDstSyncInfo; + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + + /* Command needed to be synchronised with the TA? */ + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + + /* Command needed to be synchronised with the 3D? */ + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsPending--; + } + } + + + + +#if defined(NO_HARDWARE) + /* Update sync objects pretending that we have done the job*/ + for(i = 0; i < psKick->ui32NumSrcSync; i++) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i]; + psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending; + } + + if (psKick->hDstSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psKick->hTASyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } + + if (psKick->h3DSyncInfo != IMG_NULL) + { + psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo; + + psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending; + } +#endif + + return eError; +} +#endif /* SGX_FEATURE_2D_HARDWARE */ +#endif /* TRANSFER_QUEUE */ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxutils.c b/pvr-source/services4/srvkm/devices/sgx/sgxutils.c new file mode 100644 index 0000000..227675d --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxutils.c @@ -0,0 +1,1912 @@ +/*************************************************************************/ /*! +@Title Device specific utility routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device specific functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <stddef.h> + +#include "sgxdefs.h" +#include "services_headers.h" +#include "buffer_manager.h" +#include "sgx_bridge_km.h" +#include "sgxapi_km.h" +#include "sgxinfo.h" +#include "sgx_mkif_km.h" +#include "sysconfig.h" +#include "pdump_km.h" +#include "mmu.h" +#include "pvr_bridge_km.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "sgxutils.h" +#include "ttrace.h" + +#ifdef __linux__ +#include <linux/kernel.h> // sprintf +#include <linux/string.h> // strncpy, strlen +#else +#include <stdio.h> +#endif + +IMG_UINT64 ui64KickCount; + + +#if defined(SYS_CUSTOM_POWERDOWN) +PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32CallerID); +#endif + + + +/*! +****************************************************************************** + + @Function SGXPostActivePowerEvent + + @Description + + post power event functionality (e.g. restart) + + @Input psDeviceNode : SGX Device Node + @Input ui32CallerID - KERNEL_ID or ISR_ID + + @Return IMG_VOID : + +******************************************************************************/ +static IMG_VOID SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode, + IMG_UINT32 ui32CallerID) +{ + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + + /* Update the counter for stats. */ + psSGXHostCtl->ui32NumActivePowerEvents++; + + if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "SGXPostActivePowerEvent: SGX requests immediate restart")); + + /* + Events were queued during the active power + request, so SGX will need to be restarted. + */ + if (ui32CallerID == ISR_ID) + { + psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE; + } + else + { + SGXScheduleProcessQueuesKM(psDeviceNode); + } + } +} + + +/*! +****************************************************************************** + + @Function SGXTestActivePowerEvent + + @Description + + Checks whether the microkernel has generated an active power event. If so, + perform the power transition. + + @Input psDeviceNode : SGX Device Node + @Input ui32CallerID - KERNEL_ID or ISR_ID + + @Return IMG_VOID : + +******************************************************************************/ +IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CallerID) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + + /* + * Quickly check (without lock) if there is an IDLE or APM event we should handle. + * This check fails most of the time so we don't want to incur lock overhead. + * Check the flags in the reverse order that microkernel clears them to prevent + * us from seeing an inconsistent state. + */ + if ((((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) != 0)) || + (((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0))) + { + eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); + if (eError == PVRSRV_ERROR_RETRY) + { + return; + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXTestActivePowerEvent failed to acquire lock - " + "ui32CallerID:%d eError:%u", ui32CallerID, eError)); + return; + } + + /* + * Check again (with lock) if IDLE event has been cleared or handled. A race + * condition may allow multiple threads to pass the quick check. + */ + if(((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_IDLE) != 0)) + { + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_IDLE; + psDevInfo->bSGXIdle = IMG_TRUE; + + SysSGXIdleEntered(); + } + + /* + * Check again (with lock) if APM event has been cleared or handled. A race + * condition may allow multiple threads to pass the quick check. + */ + if (((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0) && + ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0)) + { + /* Microkernel is idle and is requesting to be powered down. */ + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER; + + /* Suspend pdumping. */ + PDUMPSUSPEND(); + +#if defined(SYS_CUSTOM_POWERDOWN) + /* + Some power down code cannot be executed inside an MISR on + some platforms that use mutexes inside the power code. + */ + eError = SysPowerDownMISR(psDeviceNode, ui32CallerID); +#else + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE_OFF); +#endif + if (eError == PVRSRV_OK) + { + SGXPostActivePowerEvent(psDeviceNode, ui32CallerID); + } + /* Resume pdumping */ + PDUMPRESUME(); + } + + PVRSRVPowerUnlock(ui32CallerID); + } + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%u", eError)); + } +} + + +/****************************************************************************** + FUNCTION : SGXAcquireKernelCCBSlot + + PURPOSE : Attempts to obtain a slot in the Kernel CCB + + PARAMETERS : psCCB - the CCB + + RETURNS : Address of space if available, IMG_NULL otherwise +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SGXAcquireKernelCCBSlot) +#endif +static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB) +{ + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset) + { + return &psCCB->psCommands[*psCCB->pui32WriteOffset]; + } + + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Time out on waiting for CCB space */ + return IMG_NULL; +} + +/*! +****************************************************************************** + + @Function SGXScheduleCCBCommand + + @Description - Submits a CCB command and kicks the ukernel (without + power management) + + @Input psDevInfo - pointer to device info + @Input eCmdType - see SGXMKIF_CMD_* + @Input psCommandData - kernel CCB command + @Input ui32CallerID - KERNEL_ID or ISR_ID + @Input ui32PDumpFlags + + @Return ui32Error - success or failure + +******************************************************************************/ +PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCmdType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene) +{ + PVRSRV_SGX_CCB_INFO *psKernelCCB; + PVRSRV_ERROR eError = PVRSRV_OK; + SGXMKIF_COMMAND *psSGXCommand; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; +#if defined(FIX_HW_BRN_31620) + IMG_UINT32 ui32CacheMasks[4]; + IMG_UINT32 i; + MMU_CONTEXT *psMMUContext; +#endif +#if defined(PDUMP) + IMG_VOID *pvDumpCommand; + IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended(); + IMG_BOOL bPersistentProcess = IMG_FALSE; +#else + PVR_UNREFERENCED_PARAMETER(ui32CallerID); + PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); +#endif + +#if defined(FIX_HW_BRN_31620) + for(i=0;i<4;i++) + { + ui32CacheMasks[i] = 0; + } + + psMMUContext = psDevInfo->hKernelMMUContext; + psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[0]); + + /* Put the apps memory context in the bottom half */ + if (hDevMemContext) + { + BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext; + + psMMUContext = psBMContext->psMMUContext; + psDeviceNode->pfnMMUGetCacheFlushRange(psMMUContext, &ui32CacheMasks[2]); + } + + /* If we have an outstanding flush request then set the cachecontrol bit */ + if (ui32CacheMasks[0] || ui32CacheMasks[1] || ui32CacheMasks[2] || ui32CacheMasks[3]) + { + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_BIF_PD; + } +#endif + +#if defined(FIX_HW_BRN_28889) + /* + If the data cache and bif cache need invalidating there has been a cleanup + request. Therefore, we need to send the invalidate seperately and wait + for it to complete. + */ + if ( (eCmdType != SGXMKIF_CMD_PROCESS_QUEUES) && + ((psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_DATA) != 0) && + ((psDevInfo->ui32CacheControl & (SGXMKIF_CC_INVAL_BIF_PT | SGXMKIF_CC_INVAL_BIF_PD)) != 0)) + { + #if defined(PDUMP) + PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo; + #endif + SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl; + SGXMKIF_COMMAND sCacheCommand = {0}; + + eError = SGXScheduleCCBCommand(psDeviceNode, + SGXMKIF_CMD_PROCESS_QUEUES, + &sCacheCommand, + ui32CallerID, + ui32PDumpFlags, + hDevMemContext, + bLastInScene); + if (eError != PVRSRV_OK) + { + goto Exit; + } + + /* Wait for the invalidate to happen */ + #if !defined(NO_HARDWARE) + if(PollForValueKM(&psSGXHostCtl->ui32InvalStatus, + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + 2 * MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommand: Wait for uKernel to Invalidate BIF cache failed")); + PVR_DBG_BREAK; + } + #endif + + #if defined(PDUMP) + /* Pdump the poll as well. */ + PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for BIF cache invalidate request to complete"); + PDUMPMEMPOL(psSGXHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32InvalStatus), + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(psSGXHostCtlMemInfo)); + #endif /* PDUMP */ + + psSGXHostCtl->ui32InvalStatus &= ~(PVRSRV_USSE_EDM_BIF_INVAL_COMPLETE); + PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo)); + } +#else + PVR_UNREFERENCED_PARAMETER(hDevMemContext); +#endif + +#if defined(FIX_HW_BRN_31620) + if ((eCmdType != SGXMKIF_CMD_FLUSHPDCACHE) && (psDevInfo->ui32CacheControl & SGXMKIF_CC_INVAL_BIF_PD)) + { + SGXMKIF_COMMAND sPDECacheCommand = {0}; + IMG_DEV_PHYADDR sDevPAddr; + + /* Put the kernel info in the top 1/2 of the data */ + psMMUContext = psDevInfo->hKernelMMUContext; + + psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr); + sPDECacheCommand.ui32Data[0] = sDevPAddr.uiAddr | 1; + sPDECacheCommand.ui32Data[1] = ui32CacheMasks[0]; + sPDECacheCommand.ui32Data[2] = ui32CacheMasks[1]; + + /* Put the apps memory context in the bottom half */ + if (hDevMemContext) + { + BM_CONTEXT *psBMContext = (BM_CONTEXT *) hDevMemContext; + + psMMUContext = psBMContext->psMMUContext; + + psDeviceNode->pfnMMUGetPDPhysAddr(psMMUContext, &sDevPAddr); + /* Or in 1 to the lsb to show we have a valid context */ + sPDECacheCommand.ui32Data[3] = sDevPAddr.uiAddr | 1; + sPDECacheCommand.ui32Data[4] = ui32CacheMasks[2]; + sPDECacheCommand.ui32Data[5] = ui32CacheMasks[3]; + } + + /* Only do a kick if there is any update */ + if (sPDECacheCommand.ui32Data[1] | sPDECacheCommand.ui32Data[2] | sPDECacheCommand.ui32Data[4] | + sPDECacheCommand.ui32Data[5]) + { + eError = SGXScheduleCCBCommand(psDeviceNode, + SGXMKIF_CMD_FLUSHPDCACHE, + &sPDECacheCommand, + ui32CallerID, + ui32PDumpFlags, + hDevMemContext, + bLastInScene); + if (eError != PVRSRV_OK) + { + goto Exit; + } + } + } +#endif +#if defined(PDUMP) + /* + * For persistent processes, the HW kicks should not go into the + * extended init phase; only keep memory transactions from the + * window system which are necessary to run the client app. + */ + { + PVRSRV_PER_PROCESS_DATA* psPerProc = PVRSRVFindPerProcessData(); + if(psPerProc != IMG_NULL) + { + bPersistentProcess = psPerProc->bPDumpPersistent; + } + } +#endif /* PDUMP */ + psKernelCCB = psDevInfo->psKernelCCBInfo; + + psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB); + + /* Wait for CCB space timed out */ + if(!psSGXCommand) + { + PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Wait for CCB space timed out")) ; + eError = PVRSRV_ERROR_TIMEOUT; + goto Exit; + } + + /* embed cache control word */ + psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl; + +#if defined(PDUMP) + /* Accumulate any cache invalidates that may have happened */ + psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl; +#endif + + /* and clear it */ + psDevInfo->ui32CacheControl = 0; + + /* Copy command data over */ + *psSGXCommand = *psCommandData; + + if (eCmdType >= SGXMKIF_CMD_MAX) + { + PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Unknown command type: %d", eCmdType)) ; + eError = PVRSRV_ERROR_INVALID_CCB_COMMAND; + goto Exit; + } + + if (eCmdType == SGXMKIF_CMD_2D || + eCmdType == SGXMKIF_CMD_TRANSFER || + ((eCmdType == SGXMKIF_CMD_TA) && bLastInScene)) + { + SYS_DATA *psSysData; + + /* CPU cache clean control */ + SysAcquireData(&psSysData); + + if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) + { + OSFlushCPUCacheKM(); + } + else if(psSysData->ePendingCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) + { + OSCleanCPUCacheKM(); + } + + /* Clear the pending op */ + psSysData->ePendingCacheOpType = PVRSRV_MISC_INFO_CPUCACHEOP_NONE; + } + + PVR_ASSERT(eCmdType < SGXMKIF_CMD_MAX); + psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType]; /* PRQA S 3689 */ /* misuse of enums for bounds checking */ + +#if defined(PDUMP) + if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) && + (bPersistentProcess == IMG_FALSE) ) + { + /* Poll for space in the CCB. */ + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for space in the Kernel CCB\r\n"); + PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo, + offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset), + (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff, + 0xff, + PDUMP_POLL_OPERATOR_NOTEQUAL, + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command (type == %d)\r\n", eCmdType); + pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(SGXMKIF_COMMAND))); + + PDUMPMEM(pvDumpCommand, + psKernelCCB->psCCBMemInfo, + psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND), + sizeof(SGXMKIF_COMMAND), + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo)); + + /* Overwrite cache control with pdump shadow */ + PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl, + psKernelCCB->psCCBMemInfo, + psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND) + + offsetof(SGXMKIF_COMMAND, ui32CacheControl), + sizeof(IMG_UINT32), + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo)); + + if (PDumpIsCaptureFrameKM() + || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + /* Clear cache invalidate shadow */ + psDevInfo->sPDContext.ui32CacheControl = 0; + } + } +#endif + +#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + /* Make sure the previous command has been read before send the next one */ + eError = PollForValueKM (psKernelCCB->pui32ReadOffset, + *psKernelCCB->pui32WriteOffset, + 0xFF, + MAX_HW_TIME_US, + MAX_HW_TIME_US/WAIT_TRY_COUNT, + IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXScheduleCCBCommand: Timeout waiting for previous command to be read")) ; + eError = PVRSRV_ERROR_TIMEOUT; + goto Exit; + } +#endif + + /* + Increment the write offset + */ + *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255; + +#if defined(PDUMP) + if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE) && + (bPersistentProcess == IMG_FALSE) ) + { + #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for previous Kernel CCB CMD to be read\r\n"); + PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo, + offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset), + (psKernelCCB->ui32CCBDumpWOff), + 0xFF, + PDUMP_POLL_OPERATOR_EQUAL, + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); + #endif + + if (PDumpIsCaptureFrameKM() + || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0)) + { + psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF; + psDevInfo->ui32KernelCCBEventKickerDumpVal = (psDevInfo->ui32KernelCCBEventKickerDumpVal + 1) & 0xFF; + } + + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB write offset\r\n"); + PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff, + psKernelCCB->psCCBCtlMemInfo, + offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset), + sizeof(IMG_UINT32), + ui32PDumpFlags, + MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo)); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB event kicker\r\n"); + PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal, + psDevInfo->psKernelCCBEventKickerMemInfo, + 0, + sizeof(IMG_UINT32), + ui32PDumpFlags, + MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo)); + PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kick the SGX microkernel\r\n"); + #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags); + #else + PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags); + #endif + } +#endif + + *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF; + + /* + * New command submission is considered a proper handling of any pending + * IDLE or APM event, so mark them as handled to prevent other host threads + * from taking action. + */ + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_IDLE; + psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER; + + OSWriteMemoryBarrier(); + + /* Order is importent for post processor! */ + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE, + MKSYNC_TOKEN_KERNEL_CCB_OFFSET, *psKernelCCB->pui32WriteOffset); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE, + MKSYNC_TOKEN_CORE_CLK, psDevInfo->ui32CoreClockSpeed); + PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_MKSYNC, PVRSRV_TRACE_CLASS_NONE, + MKSYNC_TOKEN_UKERNEL_CLK, psDevInfo->ui32uKernelTimerClock); + + +#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE) + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), + EUR_CR_EVENT_KICK2_NOW_MASK); +#else + OSWriteHWReg(psDevInfo->pvRegsBaseKM, + SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), + EUR_CR_EVENT_KICK_NOW_MASK); +#endif + + OSMemoryBarrier(); + +#if defined(NO_HARDWARE) + /* Increment read offset */ + *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255; +#endif + + ui64KickCount++; +Exit: + return eError; +} + + +/*! +****************************************************************************** + + @Function SGXScheduleCCBCommandKM + + @Description - Submits a CCB command and kicks the ukernel + + @Input psDeviceNode - pointer to SGX device node + @Input eCmdType - see SGXMKIF_CMD_* + @Input psCommandData - kernel CCB command + @Input ui32CallerID - KERNEL_ID or ISR_ID + @Input ui32PDumpFlags + + @Return ui32Error - success or failure + +******************************************************************************/ +PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCmdType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + + eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE); + if (eError == PVRSRV_ERROR_RETRY) + { + if (ui32CallerID == ISR_ID) + { + SYS_DATA *psSysData; + + /* + ISR failed to acquire lock so it must be held by a kernel thread. + Bring up and kick SGX if necessary when the lock is available. + */ + psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE; + eError = PVRSRV_OK; + + SysAcquireData(&psSysData); + OSScheduleMISR(psSysData); + } + else + { + /* + Return to srvclient for retry. + */ + } + + return eError; + } + else if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - " + "ui32CallerID:%d eError:%u", ui32CallerID, eError)); + return eError; + } + + /* Note that a power-up has been dumped in the init phase. */ + PDUMPSUSPEND(); + + SysSGXCommandPending(psDevInfo->bSGXIdle); + psDevInfo->bSGXIdle = IMG_FALSE; + + /* Ensure that SGX is powered up before kicking the ukernel. */ + eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE_ON); + + PDUMPRESUME(); + + if (eError == PVRSRV_OK) + { + psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE; + } + else + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - " + "ui32CallerID:%d eError:%u", ui32CallerID, eError)); + return eError; + } + + eError = SGXScheduleCCBCommand(psDeviceNode, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags, hDevMemContext, bLastInScene); + + PVRSRVPowerUnlock(ui32CallerID); + return eError; +} + + +/*! +****************************************************************************** + + @Function SGXScheduleProcessQueuesKM + + @Description - Software command complete handler + + @Input psDeviceNode - SGX device node + +******************************************************************************/ +PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + SGXMKIF_HOST_CTL *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM; + IMG_UINT32 ui32PowerStatus; + SGXMKIF_COMMAND sCommand = {0}; + + ui32PowerStatus = psHostCtl->ui32PowerStatus; + if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0) + { + /* The ukernel has no work to do so don't waste power. */ + return PVRSRV_OK; + } + + eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %u", eError)); + return eError; + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function SGXIsDevicePowered + + @Description + + Whether the device is powered, for the purposes of lockup detection. + + @Input psDeviceNode - pointer to device node + + @Return IMG_BOOL : Whether device is powered + +******************************************************************************/ +IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode) +{ + return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex); +} + +/*! +******************************************************************************* + + @Function SGXGetInternalDevInfoKM + + @Description + Gets device information that is not intended to be passed + on beyond the srvclient libs. + + @Input hDevCookie + + @Output psSGXInternalDevInfo + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, +#if defined (SUPPORT_SID_INTERFACE) + SGX_INTERNAL_DEVINFO_KM *psSGXInternalDevInfo) +#else + SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo) +#endif +{ + PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice; + + psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags; + psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff; + + /* This should be patched up by OS bridge code */ + psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle = + (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo; + + return PVRSRV_OK; +} + + +/***************************************************************************** + FUNCTION : SGXCleanupRequest + + PURPOSE : Wait for the microkernel to clean up its references to either a + render context or render target. + + PARAMETERS : psDeviceNode - SGX device node + psHWDataDevVAddr - Device Address of the resource + ui32CleanupType - PVRSRV_CLEANUPCMD_* + bForceCleanup - Skips sync polling + + RETURNS : error status +*****************************************************************************/ +PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR *psHWDataDevVAddr, + IMG_UINT32 ui32CleanupType, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; + PVRSRV_KERNEL_MEM_INFO *psHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo; + SGXMKIF_HOST_CTL *psHostCtl = psHostCtlMemInfo->pvLinAddrKM; + + SGXMKIF_COMMAND sCommand = {0}; + + + if (bForceCleanup != FORCE_CLEANUP) + { + sCommand.ui32Data[0] = ui32CleanupType; + sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr; + PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resource clean-up, Type %u, Data 0x%X", sCommand.ui32Data[0], sCommand.ui32Data[1]); + + eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0, IMG_NULL, IMG_FALSE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command")); + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + return eError; + } + + /* Wait for the uKernel process the cleanup request */ + #if !defined(NO_HARDWARE) + if(PollForValueKM(&psHostCtl->ui32CleanupStatus, + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE, + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE, + 10 * MAX_HW_TIME_US, + 1000, + IMG_TRUE) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up (%u) failed", ui32CleanupType)); + eError = PVRSRV_ERROR_TIMEOUT; + SGXDumpDebugInfo(psDevInfo, IMG_FALSE); + PVR_DBG_BREAK; + } + #endif + + #if defined(PDUMP) + /* + Pdump the poll as well. + Note: + We don't expect the cleanup to report busy as the client should have + ensured the the resource has been finished with before requesting + it's cleanup. This isn't true of the abnormal termination case but + we don't expect to PDump that. Unless/until PDump has flow control + there isn't anything else we can do. + */ + PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for clean-up request to complete"); + PDUMPMEMPOL(psHostCtlMemInfo, + offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE, + PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE, + PDUMP_POLL_OPERATOR_EQUAL, + 0, + MAKEUNIQUETAG(psHostCtlMemInfo)); + #endif /* PDUMP */ + + if (eError != PVRSRV_OK) + { + return eError; + } + } + + if (psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_BUSY) + { + /* Only one flag should be set */ + PVR_ASSERT((psHostCtl->ui32CleanupStatus & PVRSRV_USSE_EDM_CLEANUPCMD_DONE) == 0); + eError = PVRSRV_ERROR_RETRY; + psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_BUSY); + } + else + { + eError = PVRSRV_OK; + psHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE | PVRSRV_USSE_EDM_CLEANUPCMD_DONE); + } + + PDUMPMEM(IMG_NULL, psHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psHostCtlMemInfo)); + + /* Request the cache invalidate */ +#if defined(SGX_FEATURE_SYSTEM_CACHE) + psDevInfo->ui32CacheControl |= (SGXMKIF_CC_INVAL_BIF_SL | SGXMKIF_CC_INVAL_DATA); +#else + psDevInfo->ui32CacheControl |= SGXMKIF_CC_INVAL_DATA; +#endif + return eError; +} + + +typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psHWRenderContextMemInfo; + IMG_HANDLE hBlockAlloc; + PRESMAN_ITEM psResItem; + IMG_BOOL bCleanupTimerRunning; + IMG_PVOID pvTimeData; +} SGX_HW_RENDER_CONTEXT_CLEANUP; + + +static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + + eError = SGXCleanupRequest(psCleanup->psDeviceNode, + &psCleanup->psHWRenderContextMemInfo->sDevVAddr, + PVRSRV_CLEANUPCMD_RC, + bForceCleanup); + + if (eError == PVRSRV_ERROR_RETRY) + { + if (!psCleanup->bCleanupTimerRunning) + { + OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US); + psCleanup->bCleanupTimerRunning = IMG_TRUE; + } + else + { + if (OSTimeHasTimePassed(psCleanup->pvTimeData)) + { + eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + OSTimeDestroy(psCleanup->pvTimeData); + } + } + } + else + { + if (psCleanup->bCleanupTimerRunning) + { + OSTimeDestroy(psCleanup->pvTimeData); + } + } + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the Device Mem allocated */ + PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode, + psCleanup->psHWRenderContextMemInfo); + + /* Finally, free the cleanup structure itself */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + } + + return eError; +} + +typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psHWTransferContextMemInfo; + IMG_HANDLE hBlockAlloc; + PRESMAN_ITEM psResItem; + IMG_BOOL bCleanupTimerRunning; + IMG_PVOID pvTimeData; +} SGX_HW_TRANSFER_CONTEXT_CLEANUP; + + +static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + + eError = SGXCleanupRequest(psCleanup->psDeviceNode, + &psCleanup->psHWTransferContextMemInfo->sDevVAddr, + PVRSRV_CLEANUPCMD_TC, + bForceCleanup); + + if (eError == PVRSRV_ERROR_RETRY) + { + if (!psCleanup->bCleanupTimerRunning) + { + OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US); + psCleanup->bCleanupTimerRunning = IMG_TRUE; + } + else + { + if (OSTimeHasTimePassed(psCleanup->pvTimeData)) + { + eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + OSTimeDestroy(psCleanup->pvTimeData); + } + } + } + else + { + if (psCleanup->bCleanupTimerRunning) + { + OSTimeDestroy(psCleanup->pvTimeData); + } + } + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the Device Mem allocated */ + PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode, + psCleanup->psHWTransferContextMemInfo); + + /* Finally, free the cleanup structure itself */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + } + + return eError; +} + +IMG_EXPORT +IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE hDeviceNode, + IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr, + IMG_UINT32 ui32HWRenderContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psHeapInfo; + IMG_HANDLE hDevMemContextInt; + MMU_CONTEXT *psMMUContext; + IMG_DEV_PHYADDR sPDDevPAddr; + int iPtrByte; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + PRESMAN_ITEM psResItem; + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), + (IMG_VOID **)&psCleanup, + &hBlockAlloc, + "SGX Hardware Render Context Cleanup"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure")); + goto exit0; + } + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID]; + + eError = PVRSRVAllocDeviceMemKM(hDeviceNode, + psPerProc, + psHeapInfo->hDevMemHeap, + PVRSRV_MEM_READ | PVRSRV_MEM_WRITE + | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT + | PVRSRV_MEM_CACHE_CONSISTENT, + ui32HWRenderContextSize, + 32, + IMG_NULL, + 0, + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psCleanup->psHWRenderContextMemInfo, + "HW Render Context"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate device memory for HW Render Context")); + goto exit1; + } + + eError = OSCopyFromUser(psPerProc, + psCleanup->psHWRenderContextMemInfo->pvLinAddrKM, + psHWRenderContextCpuVAddr, + ui32HWRenderContextSize); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't copy user-mode copy of HWContext into device memory")); + goto exit2; + } + + /* Pass the DevVAddr of the new context back up through the bridge */ + psHWRenderContextDevVAddr->uiAddr = psCleanup->psHWRenderContextMemInfo->sDevVAddr.uiAddr; + + /* Retrieve the PDDevPAddr */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Can't lookup DevMem Context")); + goto exit2; + } + + psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt); + sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext); + + /* + patch-in the Page-Directory Device-Physical address. Note that this is + copied-in one byte at a time, as we have no guarantee that the usermode- + provided ui32OffsetToPDDevPAddr is a validly-aligned address for the + current CPU architecture. + */ + pSrc = (IMG_UINT8 *)&sPDDevPAddr; + pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetToPDDevPAddr; + + for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + +#if defined(PDUMP) + /* PDUMP the HW context */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Render context struct"); + + PDUMPMEM( + IMG_NULL, + psCleanup->psHWRenderContextMemInfo, + 0, + ui32HWRenderContextSize, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo)); + + /* PDUMP the PDDevPAddr */ + PDUMPCOMMENT("Page directory address in HW render context"); + PDUMPPDDEVPADDR( + psCleanup->psHWRenderContextMemInfo, + ui32OffsetToPDDevPAddr, + sPDDevPAddr, + MAKEUNIQUETAG(psCleanup->psHWRenderContextMemInfo), + PDUMP_PD_UNIQUETAG); +#endif + + psCleanup->hBlockAlloc = hBlockAlloc; + psCleanup->psDeviceNode = psDeviceNode; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_HW_RENDER_CONTEXT, + (IMG_VOID *)psCleanup, + 0, + &SGXCleanupHWRenderContextCallback); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed")); + goto exit2; + } + + psCleanup->psResItem = psResItem; + + return (IMG_HANDLE)psCleanup; + +/* Error exit paths */ +exit2: + PVRSRVFreeDeviceMemKM(hDeviceNode, + psCleanup->psHWRenderContextMemInfo); +exit1: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, out of scope*/ +exit0: + return IMG_NULL; +} + +IMG_EXPORT +PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; + + PVR_ASSERT(hHWRenderContext != IMG_NULL); + + psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext; + + if (psCleanup == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWRenderContextKM: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup); + + return eError; +} + + +IMG_EXPORT +IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE hDeviceNode, + IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr, + IMG_UINT32 ui32HWTransferContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psHeapInfo; + IMG_HANDLE hDevMemContextInt; + MMU_CONTEXT *psMMUContext; + IMG_DEV_PHYADDR sPDDevPAddr; + int iPtrByte; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + PRESMAN_ITEM psResItem; + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), + (IMG_VOID **)&psCleanup, + &hBlockAlloc, + "SGX Hardware Transfer Context Cleanup"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure")); + goto exit0; + } + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID]; + + eError = PVRSRVAllocDeviceMemKM(hDeviceNode, + psPerProc, + psHeapInfo->hDevMemHeap, + PVRSRV_MEM_READ | PVRSRV_MEM_WRITE + | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT + | PVRSRV_MEM_CACHE_CONSISTENT, + ui32HWTransferContextSize, + 32, + IMG_NULL, + 0, + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psCleanup->psHWTransferContextMemInfo, + "HW Render Context"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate device memory for HW Render Context")); + goto exit1; + } + + eError = OSCopyFromUser(psPerProc, + psCleanup->psHWTransferContextMemInfo->pvLinAddrKM, + psHWTransferContextCpuVAddr, + ui32HWTransferContextSize); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't copy user-mode copy of HWContext into device memory")); + goto exit2; + } + + /* Pass the DevVAddr of the new context back up through the bridge */ + psHWTransferContextDevVAddr->uiAddr = psCleanup->psHWTransferContextMemInfo->sDevVAddr.uiAddr; + + /* Retrieve the PDDevPAddr */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Can't lookup DevMem Context")); + goto exit2; + } + + psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt); + sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext); + + /* + patch-in the Page-Directory Device-Physical address. Note that this is + copied-in one byte at a time, as we have no guarantee that the usermode- + provided ui32OffsetToPDDevPAddr is a validly-aligned address for the + current CPU architecture. + */ + pSrc = (IMG_UINT8 *)&sPDDevPAddr; + pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetToPDDevPAddr; + + for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + +#if defined(PDUMP) + /* PDUMP the HW Transfer Context */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW Transfer context struct"); + + PDUMPMEM( + IMG_NULL, + psCleanup->psHWTransferContextMemInfo, + 0, + ui32HWTransferContextSize, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo)); + + /* PDUMP the PDDevPAddr */ + PDUMPCOMMENT("Page directory address in HW transfer context"); + + PDUMPPDDEVPADDR( + psCleanup->psHWTransferContextMemInfo, + ui32OffsetToPDDevPAddr, + sPDDevPAddr, + MAKEUNIQUETAG(psCleanup->psHWTransferContextMemInfo), + PDUMP_PD_UNIQUETAG); +#endif + + psCleanup->hBlockAlloc = hBlockAlloc; + psCleanup->psDeviceNode = psDeviceNode; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_HW_TRANSFER_CONTEXT, + psCleanup, + 0, + &SGXCleanupHWTransferContextCallback); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed")); + goto exit2; + } + + psCleanup->psResItem = psResItem; + + return (IMG_HANDLE)psCleanup; + +/* Error exit paths */ +exit2: + PVRSRVFreeDeviceMemKM(hDeviceNode, + psCleanup->psHWTransferContextMemInfo); +exit1: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, out of scope*/ + +exit0: + return IMG_NULL; +} + +IMG_EXPORT +PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; + + PVR_ASSERT(hHWTransferContext != IMG_NULL); + + psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext; + + if (psCleanup == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: invalid parameter")); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup); + + return eError; +} + +IMG_EXPORT +PVRSRV_ERROR SGXSetTransferContextPriorityKM( + IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWTransferContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField) +{ + SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + int iPtrByte; + PVR_UNREFERENCED_PARAMETER(hDeviceNode); + + if (hHWTransferContext != IMG_NULL) + { + psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext; + + if ((ui32OffsetOfPriorityField + sizeof(ui32Priority)) + >= psCleanup->psHWTransferContextMemInfo->uAllocSize) + { + PVR_DPF(( + PVR_DBG_ERROR, + "SGXSetTransferContextPriorityKM: invalid context prioirty offset")); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + cannot be sure that offset (passed from user-land) is safe to deref + as a word-ptr on current CPU arch: copy one byte at a time. + */ + pDst = (IMG_UINT8 *)psCleanup->psHWTransferContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetOfPriorityField; + pSrc = (IMG_UINT8 *)&ui32Priority; + + for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + } + return PVRSRV_OK; +} + +IMG_EXPORT +PVRSRV_ERROR SGXSetRenderContextPriorityKM( + IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWRenderContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField) +{ + SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + int iPtrByte; + PVR_UNREFERENCED_PARAMETER(hDeviceNode); + + if (hHWRenderContext != IMG_NULL) + { + psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext; + if ((ui32OffsetOfPriorityField + sizeof(ui32Priority)) + >= psCleanup->psHWRenderContextMemInfo->uAllocSize) + { + PVR_DPF(( + PVR_DBG_ERROR, + "SGXSetContextPriorityKM: invalid HWRenderContext prioirty offset")); + + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + cannot be sure that offset (passed from user-land) is safe to deref + as a word-ptr on current CPU arch: copy one byte at a time. + */ + pDst = (IMG_UINT8 *)psCleanup->psHWRenderContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetOfPriorityField; + + pSrc = (IMG_UINT8 *)&ui32Priority; + + for (iPtrByte = 0; iPtrByte < sizeof(ui32Priority); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + } + return PVRSRV_OK; +} + +#if defined(SGX_FEATURE_2D_HARDWARE) +typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_ +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + PVRSRV_KERNEL_MEM_INFO *psHW2DContextMemInfo; + IMG_HANDLE hBlockAlloc; + PRESMAN_ITEM psResItem; + IMG_BOOL bCleanupTimerRunning; + IMG_PVOID pvTimeData; +} SGX_HW_2D_CONTEXT_CLEANUP; + +static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + + /* First, ensure the context is no longer being utilised */ + eError = SGXCleanupRequest(psCleanup->psDeviceNode, + &psCleanup->psHW2DContextMemInfo->sDevVAddr, + PVRSRV_CLEANUPCMD_2DC, + bForceCleanup); + + if (eError == PVRSRV_ERROR_RETRY) + { + if (!psCleanup->bCleanupTimerRunning) + { + OSTimeCreateWithUSOffset(&psCleanup->pvTimeData, MAX_CLEANUP_TIME_US); + psCleanup->bCleanupTimerRunning = IMG_TRUE; + } + else + { + if (OSTimeHasTimePassed(psCleanup->pvTimeData)) + { + eError = PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + OSTimeDestroy(psCleanup->pvTimeData); + } + } + } + else + { + if (psCleanup->bCleanupTimerRunning) + { + OSTimeDestroy(psCleanup->pvTimeData); + } + } + + if (eError != PVRSRV_ERROR_RETRY) + { + /* Free the Device Mem allocated */ + PVRSRVFreeDeviceMemKM(psCleanup->psDeviceNode, + psCleanup->psHW2DContextMemInfo); + + /* Finally, free the cleanup structure itself */ + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_2D_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + } + return eError; +} + +IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE hDeviceNode, + IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr, + IMG_UINT32 ui32HW2DContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHW2DContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + SGX_HW_2D_CONTEXT_CLEANUP *psCleanup; + PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDeviceNode; + DEVICE_MEMORY_INFO *psDevMemoryInfo; + DEVICE_MEMORY_HEAP_INFO *psHeapInfo; + IMG_HANDLE hDevMemContextInt; + MMU_CONTEXT *psMMUContext; + IMG_DEV_PHYADDR sPDDevPAddr; + int iPtrByte; + IMG_UINT8 *pSrc; + IMG_UINT8 *pDst; + PRESMAN_ITEM psResItem; + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_2D_CONTEXT_CLEANUP), + (IMG_VOID **)&psCleanup, + &hBlockAlloc, + "SGX Hardware 2D Context Cleanup"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure")); + goto exit0; + } + + psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; + psHeapInfo = &psDevMemoryInfo->psDeviceMemoryHeap[SGX_KERNEL_DATA_HEAP_ID]; + + eError = PVRSRVAllocDeviceMemKM(hDeviceNode, + psPerProc, + psHeapInfo->hDevMemHeap, + PVRSRV_MEM_READ | PVRSRV_MEM_WRITE + | PVRSRV_MEM_NO_SYNCOBJ | PVRSRV_MEM_EDM_PROTECT + | PVRSRV_MEM_CACHE_CONSISTENT, + ui32HW2DContextSize, + 32, + IMG_NULL, + 0, + 0,0,0,IMG_NULL, /* No sparse mapping data */ + &psCleanup->psHW2DContextMemInfo, + "HW 2D Context"); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate device memory for HW Render Context")); + goto exit1; + } + + eError = OSCopyFromUser(psPerProc, + psCleanup->psHW2DContextMemInfo->pvLinAddrKM, + psHW2DContextCpuVAddr, + ui32HW2DContextSize); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't copy user-mode copy of HWContext into device memory")); + goto exit2; + } + + /* Pass the DevVAddr of the new context back up through the bridge */ + psHW2DContextDevVAddr->uiAddr = psCleanup->psHW2DContextMemInfo->sDevVAddr.uiAddr; + + /* Retrieve the PDDevPAddr */ + eError = PVRSRVLookupHandle(psPerProc->psHandleBase, + &hDevMemContextInt, + hDevMemContext, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT); + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Can't lookup DevMem Context")); + goto exit2; + } + + psMMUContext = BM_GetMMUContextFromMemContext(hDevMemContextInt); + sPDDevPAddr = psDeviceNode->pfnMMUGetPDDevPAddr(psMMUContext); + + /* + patch-in the Page-Directory Device-Physical address. Note that this is + copied-in one byte at a time, as we have no guarantee that the usermode- + provided ui32OffsetToPDDevPAddr is a validly-aligned address for the + current CPU architecture. + */ + pSrc = (IMG_UINT8 *)&sPDDevPAddr; + pDst = (IMG_UINT8 *)psCleanup->psHW2DContextMemInfo->pvLinAddrKM; + pDst += ui32OffsetToPDDevPAddr; + + for (iPtrByte = 0; iPtrByte < sizeof(IMG_DEV_PHYADDR); iPtrByte++) + { + pDst[iPtrByte] = pSrc[iPtrByte]; + } + +#if defined(PDUMP) + /* PDUMP the HW 2D Context */ + PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "HW 2D context struct"); + + PDUMPMEM( + IMG_NULL, + psCleanup->psHW2DContextMemInfo, + 0, + ui32HW2DContextSize, + PDUMP_FLAGS_CONTINUOUS, + MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo)); + + /* PDUMP the PDDevPAddr */ + PDUMPCOMMENT("Page directory address in HW 2D transfer context"); + PDUMPPDDEVPADDR( + psCleanup->psHW2DContextMemInfo, + ui32OffsetToPDDevPAddr, + sPDDevPAddr, + MAKEUNIQUETAG(psCleanup->psHW2DContextMemInfo), + PDUMP_PD_UNIQUETAG); +#endif + + psCleanup->hBlockAlloc = hBlockAlloc; + psCleanup->psDeviceNode = psDeviceNode; + psCleanup->bCleanupTimerRunning = IMG_FALSE; + + psResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_HW_2D_CONTEXT, + psCleanup, + 0, + &SGXCleanupHW2DContextCallback); + + if (psResItem == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed")); + goto exit2; + } + + psCleanup->psResItem = psResItem; + + return (IMG_HANDLE)psCleanup; + +/* Error exit paths */ +exit2: + PVRSRVFreeDeviceMemKM(hDeviceNode, + psCleanup->psHW2DContextMemInfo); +exit1: + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, + sizeof(SGX_HW_2D_CONTEXT_CLEANUP), + psCleanup, + psCleanup->hBlockAlloc); + /*not nulling pointer, out of scope*/ +exit0: + return IMG_NULL; +} + +IMG_EXPORT +PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup) +{ + PVRSRV_ERROR eError; + SGX_HW_2D_CONTEXT_CLEANUP *psCleanup; + + PVR_ASSERT(hHW2DContext != IMG_NULL); + + if (hHW2DContext == IMG_NULL) + { + return (PVRSRV_ERROR_INVALID_PARAMS); + } + + psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext; + + eError = ResManFreeResByPtr(psCleanup->psResItem, bForceCleanup); + + return eError; +} +#endif /* #if defined(SGX_FEATURE_2D_HARDWARE)*/ + +/*!**************************************************************************** + @Function SGX2DQuerySyncOpsCompleteKM + + @Input psSyncInfo : Sync object to be queried + + @Return IMG_TRUE - ops complete, IMG_FALSE - ops pending + +******************************************************************************/ +#ifdef INLINE_IS_PRAGMA +#pragma inline(SGX2DQuerySyncOpsComplete) +#endif +static INLINE +IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_UINT32 ui32ReadOpsPending, + IMG_UINT32 ui32WriteOpsPending) +{ + PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData; + + return (IMG_BOOL)( + (psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) && + (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending) + ); +} + +/*!**************************************************************************** + @Function SGX2DQueryBlitsCompleteKM + + @Input psDevInfo : SGX device info structure + + @Input psSyncInfo : Sync object to be queried + + @Return PVRSRV_ERROR + +******************************************************************************/ +IMG_EXPORT +PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo, + PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, + IMG_BOOL bWaitForComplete) +{ + IMG_UINT32 ui32ReadOpsPending, ui32WriteOpsPending; + + PVR_UNREFERENCED_PARAMETER(psDevInfo); + + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start")); + + ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending; + ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending; + + if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) + { + /* Instant success */ + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete.")); + return PVRSRV_OK; + } + + /* Not complete yet */ + if (!bWaitForComplete) + { + /* Just report not complete */ + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending.")); + return PVRSRV_ERROR_CMD_NOT_PROCESSED; + } + + /* Start polling */ + PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling.")); + + LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) + { + OSSleepms(1); + + if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending)) + { + /* Success */ + PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete.")); + return PVRSRV_OK; + } + + OSSleepms(1); + } END_LOOP_UNTIL_TIMEOUT(); + + /* Timed out */ + PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending.")); + +#if defined(DEBUG) + { + PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData; + + PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: 0x%x, Syncdata: 0x%x", + (IMG_UINTPTR_T)psSyncInfo, (IMG_UINTPTR_T)psSyncData)); + + PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending)); + PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending)); + + } +#endif + + return PVRSRV_ERROR_TIMEOUT; +} + + +IMG_EXPORT +PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, + IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr, + IMG_BOOL bForceCleanup) +{ + PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL); + + return SGXCleanupRequest(psDeviceNode, + &sHWRTDataSetDevVAddr, + PVRSRV_CLEANUPCMD_RT, + bForceCleanup); +} + + +IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TimeWraps, + IMG_UINT32 ui32Time) +{ +#if defined(EUR_CR_TIMER) + PVR_UNREFERENCED_PARAMETER(psDevInfo); + PVR_UNREFERENCED_PARAMETER(ui32TimeWraps); + return ui32Time; +#else + IMG_UINT64 ui64Clocks; + IMG_UINT32 ui32Clocksx16; + + ui64Clocks = ((IMG_UINT64)ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) + + (psDevInfo->ui32uKernelTimerClock - (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK)); + ui32Clocksx16 = (IMG_UINT32)(ui64Clocks / 16); + + return ui32Clocksx16; +#endif /* EUR_CR_TIMER */ +} + + +IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SGXClocks) +{ + /* + Round up to the next microsecond. + */ + OSWaitus(1 + (ui32SGXClocks * 1000000 / psDevInfo->ui32CoreClockSpeed)); +} + + + +/****************************************************************************** + End of file (sgxutils.c) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/devices/sgx/sgxutils.h b/pvr-source/services4/srvkm/devices/sgx/sgxutils.h new file mode 100644 index 0000000..fc2ef6f --- /dev/null +++ b/pvr-source/services4/srvkm/devices/sgx/sgxutils.h @@ -0,0 +1,195 @@ +/*************************************************************************/ /*! +@Title Device specific utility routines declarations +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Inline functions/structures specific to SGX +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "perproc.h" +#include "sgxinfokm.h" + +/* PRQA S 3410 7 */ /* macros require the absence of some brackets */ +#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \ + ((sizeof(type) <= (psCCBMemInfo)->uAllocSize) && \ + ((psCCBKick)->offset <= (psCCBMemInfo)->uAllocSize - sizeof(type))) + +#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \ + ((type *)(((IMG_CHAR *)(psCCBMemInfo)->pvLinAddrKM) + \ + (psCCBKick)->offset)) + +extern IMG_UINT64 ui64KickCount; + + +IMG_IMPORT +IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_UINT32 ui32CallerID); + +IMG_IMPORT +PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCommandType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene); +IMG_IMPORT +PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode, + SGXMKIF_CMD_TYPE eCommandType, + SGXMKIF_COMMAND *psCommandData, + IMG_UINT32 ui32CallerID, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hDevMemContext, + IMG_BOOL bLastInScene); + +IMG_IMPORT +PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_IMPORT +IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode); + +IMG_IMPORT +IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode, + IMG_CPU_VIRTADDR *psHWRenderContextCpuVAddr, + IMG_UINT32 ui32HWRenderContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +IMG_IMPORT +IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode, + IMG_CPU_VIRTADDR *psHWTransferContextCpuVAddr, + IMG_UINT32 ui32HWTransferContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +IMG_IMPORT +PVRSRV_ERROR SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, + IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr, + IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext, IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext, IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR SGXSetRenderContextPriorityKM(IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWRenderContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField); + +IMG_IMPORT +PVRSRV_ERROR SGXSetTransferContextPriorityKM(IMG_HANDLE hDeviceNode, + IMG_HANDLE hHWTransferContext, + IMG_UINT32 ui32Priority, + IMG_UINT32 ui32OffsetOfPriorityField); + +#if defined(SGX_FEATURE_2D_HARDWARE) +IMG_IMPORT +IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode, + IMG_CPU_VIRTADDR *psHW2DContextCpuVAddr, + IMG_UINT32 ui32HW2DContextSize, + IMG_UINT32 ui32OffsetToPDDevPAddr, + IMG_HANDLE hDevMemContext, + IMG_DEV_VIRTADDR *psHW2DContextDevVAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc); + +IMG_IMPORT +PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext, IMG_BOOL bForceCleanup); +#endif + +IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32TimeWraps, + IMG_UINT32 ui32Time); + +/*! +******************************************************************************* + + @Function SGXWaitClocks + + @Description + + Wait for a specified number of SGX clock cycles to elapse. + + @Input psDevInfo - SGX Device Info + @Input ui32SGXClocks - number of clock cycles to wait + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID SGXWaitClocks(PVRSRV_SGXDEV_INFO *psDevInfo, + IMG_UINT32 ui32SGXClocks); + +PVRSRV_ERROR SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR *psHWDataDevVAddr, + IMG_UINT32 ui32CleanupType, + IMG_BOOL bForceCleanup); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVGetSGXRevDataKM(PVRSRV_DEVICE_NODE* psDeviceNode, IMG_UINT32 *pui32SGXCoreRev, + IMG_UINT32 *pui32SGXCoreID); + +/*! +****************************************************************************** + + @Function SGXContextSuspend + + @Description - Interface to the SGX microkernel to instruct it to suspend or + resume processing on a given context. This will interrupt current + processing of this context if a task is already running and is + interruptable. + + @Input psDeviceNode SGX device node + @Input psHWContextDevVAddr SGX virtual address of the context to be suspended + or resumed. Can be of type SGXMKIF_HWRENDERCONTEXT, + SGXMKIF_HWTRANSFERCONTEXT or SGXMKIF_HW2DCONTEXT + @Input bResume IMG_TRUE to put a context into suspend state, + IMG_FALSE to resume a previously suspended context + +******************************************************************************/ +PVRSRV_ERROR SGXContextSuspend(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_VIRTADDR *psHWContextDevVAddr, + IMG_BOOL bResume); + +/****************************************************************************** + End of file (sgxutils.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/env/linux/Kbuild.mk b/pvr-source/services4/srvkm/env/linux/Kbuild.mk new file mode 100644 index 0000000..25e35e9 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/Kbuild.mk @@ -0,0 +1,166 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +pvrsrvkm_sgx$(SGXCORE)_$(SGX_CORE_REV)-y += \ + services4/srvkm/env/linux/osfunc.o \ + services4/srvkm/env/linux/mutils.o \ + services4/srvkm/env/linux/mmap.o \ + services4/srvkm/env/linux/module.o \ + services4/srvkm/env/linux/pdump.o \ + services4/srvkm/env/linux/proc.o \ + services4/srvkm/env/linux/pvr_bridge_k.o \ + services4/srvkm/env/linux/pvr_debug.o \ + services4/srvkm/env/linux/mm.o \ + services4/srvkm/env/linux/mutex.o \ + services4/srvkm/env/linux/event.o \ + services4/srvkm/env/linux/osperproc.o \ + services4/srvkm/env/linux/sysfs.o \ + services4/srvkm/common/buffer_manager.o \ + services4/srvkm/common/devicemem.o \ + services4/srvkm/common/deviceclass.o \ + services4/srvkm/common/handle.o \ + services4/srvkm/common/hash.o \ + services4/srvkm/common/lists.o \ + services4/srvkm/common/mem.o \ + services4/srvkm/common/mem_debug.o \ + services4/srvkm/common/metrics.o \ + services4/srvkm/common/osfunc_common.o \ + services4/srvkm/common/pdump_common.o \ + services4/srvkm/common/perproc.o \ + services4/srvkm/common/power.o \ + services4/srvkm/common/pvrsrv.o \ + services4/srvkm/common/queue.o \ + services4/srvkm/common/ra.o \ + services4/srvkm/common/refcount.o \ + services4/srvkm/common/resman.o \ + services4/srvkm/bridged/bridged_support.o \ + services4/srvkm/bridged/bridged_pvr_bridge.o \ + services4/system/$(PVR_SYSTEM)/sysconfig.o \ + services4/system/$(PVR_SYSTEM)/sysutils.o + +pvrsrvkm_sgx$(SGXCORE)_$(SGX_CORE_REV)-$(CONFIG_ION_OMAP) += \ + services4/srvkm/env/linux/ion.o +pvrsrvkm_sgx$(SGXCORE)_$(SGX_CORE_REV)-$(CONFIG_GCBV) += \ + services4/srvkm/env/linux/gc_bvmapping.o + +ifeq ($(SUPPORT_ION),1) +pvrsrvkm-y += \ + services4/srvkm/env/linux/ion.o +endif + +ifeq ($(TTRACE),1) +pvrsrvkm-y += \ + services4/srvkm/common/ttrace.o +endif + +ifneq ($(W),1) +CFLAGS_osfunc.o := -Werror +CFLAGS_mutils.o := -Werror +CFLAGS_mmap.o := -Werror +CFLAGS_module.o := -Werror +CFLAGS_pdump.o := -Werror +CFLAGS_proc.o := -Werror +CFLAGS_pvr_bridge_k.o := -Werror +CFLAGS_pvr_debug.o := -Werror +CFLAGS_mm.o := -Werror +CFLAGS_mutex.o := -Werror +CFLAGS_event.o := -Werror +CFLAGS_osperproc.o := -Werror +CFLAGS_buffer_manager.o := -Werror +CFLAGS_devicemem.o := -Werror +CFLAGS_deviceclass.o := -Werror +CFLAGS_handle.o := -Werror +CFLAGS_hash.o := -Werror +CFLAGS_metrics.o := -Werror +CFLAGS_pvrsrv.o := -Werror +CFLAGS_queue.o := -Werror +CFLAGS_ra.o := -Werror +CFLAGS_resman.o := -Werror +CFLAGS_power.o := -Werror +CFLAGS_mem.o := -Werror +CFLAGS_pdump_common.o := -Werror +CFLAGS_bridged_support.o := -Werror +CFLAGS_bridged_pvr_bridge.o := -Werror +CFLAGS_perproc.o := -Werror +CFLAGS_lists.o := -Werror +CFLAGS_mem_debug.o := -Werror +CFLAGS_osfunc_common.o := -Werror +CFLAGS_refcount.o := -Werror +endif + +# SUPPORT_SGX==1 only + +pvrsrvkm_sgx$(SGXCORE)_$(SGX_CORE_REV)-y += \ + services4/srvkm/bridged/sgx/bridged_sgx_bridge.o \ + services4/srvkm/devices/sgx/sgxinit.o \ + services4/srvkm/devices/sgx/sgxpower.o \ + services4/srvkm/devices/sgx/sgxreset.o \ + services4/srvkm/devices/sgx/sgxutils.o \ + services4/srvkm/devices/sgx/sgxkick.o \ + services4/srvkm/devices/sgx/sgxtransfer.o \ + services4/srvkm/devices/sgx/mmu.o \ + services4/srvkm/devices/sgx/pb.o + +ifneq ($(W),1) +CFLAGS_bridged_sgx_bridge.o := -Werror +CFLAGS_sgxinit.o := -Werror +CFLAGS_sgxpower.o := -Werror +CFLAGS_sgxreset.o := -Werror +CFLAGS_sgxutils.o := -Werror +CFLAGS_sgxkick.o := -Werror +CFLAGS_sgxtransfer.o := -Werror +CFLAGS_mmu.o := -Werror +CFLAGS_pb.o := -Werror +endif + +ifeq ($(SUPPORT_DRI_DRM),1) + +pvrsrvkm_sgx$(SGXCORE)_$(SGX_CORE_REV)-y += \ + services4/srvkm/env/linux/pvr_drm.o + +ccflags-y += \ + -I$(KERNELDIR)/include/drm \ + -I$(TOP)/services4/include/env/linux \ + +ifeq ($(PVR_DRI_DRM_NOT_PCI),1) +ccflags-y += -I$(TOP)/services4/3rdparty/linux_drm +endif + +endif # SUPPORT_DRI_DRM diff --git a/pvr-source/services4/srvkm/env/linux/Linux.mk b/pvr-source/services4/srvkm/env/linux/Linux.mk new file mode 100644 index 0000000..7e3d0fb --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/Linux.mk @@ -0,0 +1,45 @@ +########################################################################### ### +#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +#@License Dual MIT/GPLv2 +# +# The contents of this file are subject to the MIT license as set out below. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# Alternatively, the contents of this file may be used under the terms of +# the GNU General Public License Version 2 ("GPL") in which case the provisions +# of GPL are applicable instead of those above. +# +# If you wish to allow use of your version of this file only under the terms of +# GPL, and not to allow others to use your version of this file under the terms +# of the MIT license, indicate your decision by deleting the provisions above +# and replace them with the notice and other provisions required by GPL as set +# out in the file called "GPL-COPYING" included in this distribution. If you do +# not delete the provisions above, a recipient may use your version of this file +# under the terms of either the MIT license or GPL. +# +# This License is also included in this distribution in the file called +# "MIT-COPYING". +# +# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +### ########################################################################### + +modules := srvkm + +srvkm_type := kernel_module +srvkm_target := pvrsrvkm_sgx$(SGXCORE)_$(SGX_CORE_REV).ko +srvkm_makefile := $(THIS_DIR)/Kbuild.mk diff --git a/pvr-source/services4/srvkm/env/linux/env_data.h b/pvr-source/services4/srvkm/env/linux/env_data.h new file mode 100644 index 0000000..b838809 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/env_data.h @@ -0,0 +1,93 @@ +/*************************************************************************/ /*! +@Title Environmental Data header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux-specific part of system data. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _ENV_DATA_ +#define _ENV_DATA_ + +#include <linux/interrupt.h> +#include <linux/pci.h> + +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) +#include <linux/workqueue.h> +#endif + +/* + * Env data specific to linux - convenient place to put this + */ + +/* Fairly arbitrary sizes - hopefully enough for all bridge calls */ +#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000 +#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 + +typedef struct _PVR_PCI_DEV_TAG +{ + struct pci_dev *psPCIDev; + HOST_PCI_INIT_FLAGS ePCIFlags; + IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; +} PVR_PCI_DEV; + +typedef struct _ENV_DATA_TAG +{ + IMG_VOID *pvBridgeData; + struct pm_dev *psPowerDevice; + IMG_BOOL bLISRInstalled; + IMG_BOOL bMISRInstalled; + IMG_UINT32 ui32IRQ; + IMG_VOID *pvISRCookie; +#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) + struct workqueue_struct *psWorkQueue; +#endif +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) + struct work_struct sMISRWork; + IMG_VOID *pvMISRData; +#else + struct tasklet_struct sMISRTasklet; +#endif +#if defined (SUPPORT_ION) + IMG_HANDLE hIonHeaps; + IMG_HANDLE hIonDev; +#endif +} ENV_DATA; + +#endif /* _ENV_DATA_ */ +/***************************************************************************** + End of file (env_data.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/env/linux/env_perproc.h b/pvr-source/services4/srvkm/env/linux/env_perproc.h new file mode 100644 index 0000000..8a37a7f --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/env_perproc.h @@ -0,0 +1,79 @@ +/*************************************************************************/ /*! +@Title OS specific per process data interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Linux per process data +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __ENV_PERPROC_H__ +#define __ENV_PERPROC_H__ + +#include <linux/list.h> +#include <linux/proc_fs.h> + +#include "services.h" +#include "handle.h" + +#define ION_CLIENT_NAME_SIZE 50 +typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_ +{ + IMG_HANDLE hBlockAlloc; + struct proc_dir_entry *psProcDir; +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + struct list_head sDRMAuthListHead; +#endif +#if defined (SUPPORT_ION) + struct ion_client *psIONClient; + IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE]; +#endif +} PVRSRV_ENV_PER_PROCESS_DATA; + +IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc); + +PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc); + +IMG_VOID LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc); + +PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); + +IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID); + +#endif /* __ENV_PERPROC_H__ */ + +/****************************************************************************** + End of file (env_perproc.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/env/linux/event.c b/pvr-source/services4/srvkm/env/linux/event.c new file mode 100644 index 0000000..b70a79d --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/event.c @@ -0,0 +1,414 @@ +/*************************************************************************/ /*! +@Title Event Object +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <asm/io.h> +#include <asm/page.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +#include <asm/system.h> +#endif +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <asm/hardirq.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/capability.h> +#include <linux/sched.h> +#include <asm/uaccess.h> + +#include "img_types.h" +#include "services_headers.h" +#include "mm.h" +#include "pvrmmap.h" +#include "mmap.h" +#include "env_data.h" +#include "proc.h" +#include "mutex.h" +#include "lock.h" +#include "event.h" + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG +{ + rwlock_t sLock; + struct list_head sList; + +} PVRSRV_LINUX_EVENT_OBJECT_LIST; + + +typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG +{ + atomic_t sTimeStamp; + IMG_UINT32 ui32TimeStampPrevious; +#if defined(DEBUG) + IMG_UINT ui32Stats; +#endif + wait_queue_head_t sWait; + struct list_head sList; + IMG_HANDLE hResItem; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList; +} PVRSRV_LINUX_EVENT_OBJECT; + +/*! +****************************************************************************** + + @Function LinuxEventObjectListCreate + + @Description + + Linux wait object list creation + + @Output hOSEventKM : Pointer to the event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEventObjectList; + + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), + (IMG_VOID **)&psEventObjectList, IMG_NULL, + "Linux Event Object List") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psEventObjectList->sList); + + rwlock_init(&psEventObjectList->sLock); + + *phEventObjectList = (IMG_HANDLE *) psEventObjectList; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectListDestroy + + @Description + + Linux wait object list destruction + + @Input hOSEventKM : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList) +{ + + PVRSRV_LINUX_EVENT_OBJECT_LIST *psEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ; + + if(psEventObjectList) + { + IMG_BOOL bListEmpty; + + read_lock(&psEventObjectList->sLock); + bListEmpty = list_empty(&psEventObjectList->sList); + read_unlock(&psEventObjectList->sLock); + + if (!bListEmpty) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty")); + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEventObjectList, IMG_NULL); + /*not nulling pointer, copy on stack*/ + } + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function LinuxEventObjectDelete + + @Description + + Linux wait object removal + + @Input hOSEventObjectList : Event object list handle + @Input hOSEventObject : Event object handle + @Input bResManCallback : Called from the resman + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject) +{ + if(hOSEventObjectList) + { + if(hOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectListDelete: Event object waits: %u", psLinuxEventObject->ui32Stats)); +#endif + if(ResManFreeResByPtr(psLinuxEventObject->hResItem, CLEANUP_WITH_POLL) != PVRSRV_OK) + { + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + } + + return PVRSRV_OK; + } + } + return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; + +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectDeleteCallback + + @Description + + Linux wait object removal + + @Input hOSEventObject : Event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bForceCleanup) +{ + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; + unsigned long ulLockFlags; + + PVR_UNREFERENCED_PARAMETER(ui32Param); + PVR_UNREFERENCED_PARAMETER(bForceCleanup); + + write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags); + list_del(&psLinuxEventObject->sList); + write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags); + +#if defined(DEBUG) + PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDeleteCallback: Event object waits: %u", psLinuxEventObject->ui32Stats)); +#endif + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} +/*! +****************************************************************************** + + @Function LinuxEventObjectAdd + + @Description + + Linux wait object addition + + @Input hOSEventObjectList : Event object list handle + @Output phOSEventObject : Pointer to the event object handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject) + { + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + PVRSRV_PER_PROCESS_DATA *psPerProc; + unsigned long ulLockFlags; + + psPerProc = PVRSRVPerProcessData(ui32PID); + if (psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* allocate completion variable */ + if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), + (IMG_VOID **)&psLinuxEventObject, IMG_NULL, + "Linux Event Object") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory ")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + INIT_LIST_HEAD(&psLinuxEventObject->sList); + + atomic_set(&psLinuxEventObject->sTimeStamp, 0); + psLinuxEventObject->ui32TimeStampPrevious = 0; + +#if defined(DEBUG) + psLinuxEventObject->ui32Stats = 0; +#endif + init_waitqueue_head(&psLinuxEventObject->sWait); + + psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; + + psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext, + RESMAN_TYPE_EVENT_OBJECT, + psLinuxEventObject, + 0, + &LinuxEventObjectDeleteCallback); + + write_lock_irqsave(&psLinuxEventObjectList->sLock, ulLockFlags); + list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); + write_unlock_irqrestore(&psLinuxEventObjectList->sLock, ulLockFlags); + + *phOSEventObject = psLinuxEventObject; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectSignal + + @Description + + Linux wait object signaling function + + @Input hOSEventObjectList : Event object list handle + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList) +{ + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; + PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; + struct list_head *psListEntry, *psList; + + psList = &psLinuxEventObjectList->sList; + + /* + * We don't take the write lock in interrupt context, so we don't + * need to use read_lock_irqsave. + */ + read_lock(&psLinuxEventObjectList->sLock); + list_for_each(psListEntry, psList) + { + + psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); + + atomic_inc(&psLinuxEventObject->sTimeStamp); + wake_up_interruptible(&psLinuxEventObject->sWait); + } + read_unlock(&psLinuxEventObjectList->sLock); + + return PVRSRV_OK; + +} + +/*! +****************************************************************************** + + @Function LinuxEventObjectWait + + @Description + + Linux wait object routine + + @Input hOSEventObject : Event object handle + + @Input ui32MSTimeout : Time out value in msec + + @Return PVRSRV_ERROR : Error code + +******************************************************************************/ +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout) +{ + IMG_UINT32 ui32TimeStamp; + DEFINE_WAIT(sWait); + + PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; + + IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout); + + do + { + prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); + ui32TimeStamp = (IMG_UINT32)atomic_read(&psLinuxEventObject->sTimeStamp); + + if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp) + { + break; + } + + LinuxUnLockMutex(&gPVRSRVLock); + + ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies); + + LinuxLockMutex(&gPVRSRVLock); +#if defined(DEBUG) + psLinuxEventObject->ui32Stats++; +#endif + + + } while (ui32TimeOutJiffies); + + finish_wait(&psLinuxEventObject->sWait, &sWait); + + psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp; + + return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT; + +} + diff --git a/pvr-source/services4/srvkm/env/linux/event.h b/pvr-source/services4/srvkm/env/linux/event.h new file mode 100644 index 0000000..5c1451c --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/event.h @@ -0,0 +1,48 @@ +/*************************************************************************/ /*! +@Title Event Object +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList); +PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList); +PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject); +PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject); +PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList); +PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout); diff --git a/pvr-source/services4/srvkm/env/linux/gc_bvmapping.c b/pvr-source/services4/srvkm/env/linux/gc_bvmapping.c new file mode 100644 index 0000000..6c5d17a --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/gc_bvmapping.c @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2011 Texas Instruments, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/bltsville.h> +#include <linux/bvinternal.h> +#include <linux/gcbv-iface.h> + +#include "gc_bvmapping.h" +#include "services_headers.h" + +void gc_bvmap_meminfo(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + int i; + IMG_CPU_PHYADDR phy_addr; + unsigned long *page_addrs; + struct bvbuffdesc *buffdesc; + struct bvphysdesc *physdesc; + int num_pages; + struct bventry bv_entry; + enum bverror bv_error; + + gcbv_init(&bv_entry); + if (!bv_entry.bv_map) { + psMemInfo->bvmap_handle = NULL; + return; + } + + num_pages = (psMemInfo->uAllocSize + + PAGE_SIZE - 1) >> PAGE_SHIFT; + + page_addrs = kzalloc(sizeof(*page_addrs) * num_pages, GFP_KERNEL); + if (!page_addrs) { + printk(KERN_ERR "%s: Out of memory\n", __func__); + return; + } + + physdesc = kzalloc(sizeof(*physdesc), GFP_KERNEL); + buffdesc = kzalloc(sizeof(*buffdesc), GFP_KERNEL); + if (!buffdesc || !physdesc) { + printk(KERN_ERR "%s: Out of memory\n", __func__); + kfree(page_addrs); + kfree(physdesc); + kfree(buffdesc); + return; + } + + for (i = 0; i < num_pages; i++) { + phy_addr = OSMemHandleToCpuPAddr( + psMemInfo->sMemBlk.hOSMemHandle, i << PAGE_SHIFT); + page_addrs[i] = (u32)phy_addr.uiAddr; + } + + buffdesc->structsize = sizeof(*buffdesc); + buffdesc->map = NULL; + buffdesc->length = psMemInfo->uAllocSize; + buffdesc->auxtype = BVAT_PHYSDESC; + buffdesc->auxptr = physdesc; + physdesc->structsize = sizeof(*physdesc); + physdesc->pagesize = PAGE_SIZE; + physdesc->pagearray = page_addrs; + physdesc->pagecount = num_pages; + + /* + * For ion allocated buffers let's verify how many planes this + * meminfo consist of + */ + if(psMemInfo->ui32Flags & PVRSRV_MEM_ION) { + IMG_UINT32 num_addr_offsets = 0; + OSGetMemMultiPlaneInfo(psMemInfo->sMemBlk.hOSMemHandle, + NULL, &num_addr_offsets); + + /* + * Account for this meminfo plane offset (relative to the base + * address) if necessary + */ + if(num_addr_offsets > 0) + physdesc->pageoffset = psMemInfo->planeOffsets[0]; + + /* + * In BV there is no way to specify multiple offsets, check + * all planes have the same offset and report any discrepancy + */ + for (i = 1; i < num_addr_offsets; i++) { + IMG_UINT32 plane_offset = + psMemInfo->planeOffsets[i] % PAGE_SIZE; + if (psMemInfo->planeOffsets[0] != plane_offset) { + printk(KERN_WARNING "%s: meminfo %p offset 0 %d" + " != offset %d %d, coalignment is " + "missing\n", __func__, psMemInfo, + psMemInfo->planeOffsets[0], + i, plane_offset); + } + } + } + + bv_error = bv_entry.bv_map(buffdesc); + if (bv_error) { + printk(KERN_ERR "%s: Failed to map meminfo %p, bverror %d\n", + __func__, psMemInfo, bv_error); + psMemInfo->bvmap_handle = NULL; + } else + psMemInfo->bvmap_handle = buffdesc; + +} + +void gc_bvunmap_meminfo(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + struct bvbuffdesc *buffdesc; + struct bvphysdesc *physdesc; + struct bventry bv_entry; + enum bverror bv_error; + + gcbv_init(&bv_entry); + if (!bv_entry.bv_map || !psMemInfo || !psMemInfo->bvmap_handle) + return; + + buffdesc = psMemInfo->bvmap_handle; + physdesc = (struct bvphysdesc*) buffdesc->auxptr; + bv_error = bv_entry.bv_unmap(buffdesc); + if (bv_error) { + printk(KERN_ERR "%s: Failed to unmap bvhandle %p from meminfo " + "%p, bverror %d\n", __func__, buffdesc, psMemInfo, + bv_error); + } + + kfree(physdesc->pagearray); + kfree(physdesc); + kfree(psMemInfo->bvmap_handle); + psMemInfo->bvmap_handle = NULL; +} + +IMG_VOID *gc_meminfo_to_hndl(PVRSRV_KERNEL_MEM_INFO *psMemInfo) +{ + return psMemInfo->bvmap_handle; +} diff --git a/pvr-source/services4/srvkm/env/linux/gc_bvmapping.h b/pvr-source/services4/srvkm/env/linux/gc_bvmapping.h new file mode 100644 index 0000000..6a3a2b1 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/gc_bvmapping.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2011 Texas Instruments, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef GC_BVMAPPING_H +#define GC_BVMAPPING_H + +#include "services_headers.h" + +void gc_bvunmap_meminfo(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +void gc_bvmap_meminfo(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +IMG_VOID *gc_meminfo_to_hndl(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +#endif diff --git a/pvr-source/services4/srvkm/env/linux/ion.c b/pvr-source/services4/srvkm/env/linux/ion.c new file mode 100644 index 0000000..3e772bc --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/ion.c @@ -0,0 +1,363 @@ +/*************************************************************************/ /*! +@Title Ion driver inter-operability code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "ion.h" + +#include "services.h" +#include "servicesint.h" +#include "mutex.h" +#include "lock.h" +#include "mm.h" +#include "handle.h" +#include "perproc.h" +#include "env_perproc.h" +#include "private_data.h" +#include "pvr_debug.h" + +#include <linux/module.h> +#include <linux/file.h> +#include <linux/fs.h> + +#if defined (CONFIG_ION_OMAP) +#define MAX_HANDLES_PER_FD 2 +extern struct ion_client *gpsIONClient; + +int PVRSRVExportFDToIONHandles(int fd, struct ion_client **client, + struct ion_handle **handles, + unsigned int *num_handles) +{ + PVRSRV_FILE_PRIVATE_DATA *psPrivateData; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + LinuxMemArea *psLinuxMemArea; + PVRSRV_ERROR eError; + struct file *psFile; + int i; + unsigned int ui32NumHandles = *num_handles; + int ret = -EINVAL; + + /* Take the bridge mutex so the handle won't be freed underneath us */ + LinuxLockMutex(&gPVRSRVLock); + + psFile = fget(fd); + if(!psFile) + goto err_unlock; + + psPrivateData = psFile->private_data; + if(!psPrivateData) + { + PVR_DPF((PVR_DBG_ERROR, "%s: struct file* has no private_data; " + "invalid export handle", __func__)); + goto err_fput; + } + + eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psKernelMemInfo, + psPrivateData->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up MEM_INFO handle", + __func__)); + goto err_fput; + } + + psLinuxMemArea = (LinuxMemArea *)psKernelMemInfo->sMemBlk.hOSMemHandle; + BUG_ON(psLinuxMemArea == IMG_NULL); + + if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ION) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Valid handle, but not an ION buffer", + __func__)); + goto err_fput; + } + + /* Client is requesting fewer handles then we have */ + if(ui32NumHandles < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes) { + + PVR_DPF((PVR_DBG_ERROR, "%s: Client requested %u handles, but we have %u", + __func__, + ui32NumHandles, + psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes)); + + /* Clear client handles */ + for (i = 0; i < ui32NumHandles; i++) + handles[i] = NULL; + + /* Return number of handles to client */ + *num_handles = psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; + goto err_fput; + } + + for (i = 0; (i < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes) && (i < MAX_HANDLES_PER_FD); i++) + handles[i] = psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i]; + + *num_handles = i; + + if(client) + *client = gpsIONClient; + + ret = 0; + +err_fput: + fput(psFile); +err_unlock: + /* Allow PVRSRV clients to communicate with srvkm again */ + LinuxUnLockMutex(&gPVRSRVLock); + + return ret; +} + +struct ion_handle * +PVRSRVExportFDToIONHandle(int fd, struct ion_client **client) +{ + unsigned int num_handles = 1; + struct ion_handle *psHandle = IMG_NULL; + PVRSRVExportFDToIONHandles(fd, client, &psHandle, &num_handles); + return psHandle; +} + +EXPORT_SYMBOL(PVRSRVExportFDToIONHandles); +EXPORT_SYMBOL(PVRSRVExportFDToIONHandle); +#endif + +#if defined (SUPPORT_ION) +#include "syscommon.h" +#include "env_data.h" +#include "../drivers/gpu/ion/ion_priv.h" +#include "linux/kernel.h" + +struct ion_heap **apsIonHeaps; +struct ion_device *psIonDev; + +static struct ion_platform_data generic_config = { + .nr = 2, + .heaps = { + { + .type = ION_HEAP_TYPE_SYSTEM_CONTIG, + .name = "System contig", + .id = ION_HEAP_TYPE_SYSTEM_CONTIG, + }, + { + .type = ION_HEAP_TYPE_SYSTEM, + .name = "System", + .id = ION_HEAP_TYPE_SYSTEM, + } + } +}; + +PVRSRV_ERROR IonInit(IMG_VOID) +{ + int uiHeapCount = generic_config.nr; + int uiError; + int i; + + apsIonHeaps = kzalloc(sizeof(struct ion_heap *) * uiHeapCount, GFP_KERNEL); + /* Create the ion devicenode */ + psIonDev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(psIonDev)) { + kfree(apsIonHeaps); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Register all the heaps */ + for (i = 0; i < generic_config.nr; i++) + { + struct ion_platform_heap *psPlatHeapData = &generic_config.heaps[i]; + + apsIonHeaps[i] = ion_heap_create(psPlatHeapData); + if (IS_ERR_OR_NULL(apsIonHeaps[i])) + { + uiError = PTR_ERR(apsIonHeaps[i]); + goto failHeapCreate; + } + ion_device_add_heap(psIonDev, apsIonHeaps[i]); + } + + return PVRSRV_OK; +failHeapCreate: + for (i = 0; i < uiHeapCount; i++) { + if (apsIonHeaps[i]) + { + ion_heap_destroy(apsIonHeaps[i]); + } + } + kfree(apsIonHeaps); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + +IMG_VOID IonDeinit(IMG_VOID) +{ + int uiHeapCount = generic_config.nr; + int i; + + for (i = 0; i < uiHeapCount; i++) { + if (apsIonHeaps[i]) + { + ion_heap_destroy(apsIonHeaps[i]); + } + } + kfree(apsIonHeaps); + ion_device_destroy(psIonDev); +} + +typedef struct _ION_IMPORT_DATA_ +{ + struct ion_client *psIonClient; + struct ion_handle *psIonHandle; + IMG_PVOID pvKernAddr; +} ION_IMPORT_DATA; + +PVRSRV_ERROR IonImportBufferAndAquirePhysAddr(IMG_HANDLE hIonDev, + IMG_HANDLE hIonFD, + IMG_UINT32 *pui32PageCount, + IMG_SYS_PHYADDR **ppasSysPhysAddr, + IMG_PVOID *ppvKernAddr, + IMG_HANDLE *phPriv) +{ + struct ion_client *psIonClient = hIonDev; + struct ion_handle *psIonHandle; + struct scatterlist *psScatterList; + struct scatterlist *psTemp; + IMG_SYS_PHYADDR *pasSysPhysAddr = NULL; + ION_IMPORT_DATA *psImportData; + PVRSRV_ERROR eError; + IMG_UINT32 ui32PageCount = 0; + IMG_UINT32 i; + IMG_PVOID pvKernAddr; + int fd = (int) hIonFD; + + psImportData = kmalloc(sizeof(ION_IMPORT_DATA), GFP_KERNEL); + if (psImportData == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + /* Get the buffer handle */ + psIonHandle = ion_import_fd(psIonClient, fd); + if (psIonHandle == IMG_NULL) + { + eError = PVRSRV_ERROR_BAD_MAPPING; + goto exitFailImport; + } + + /* Create data for free callback */ + psImportData->psIonClient = psIonClient; + psImportData->psIonHandle = psIonHandle; + + psScatterList = ion_map_dma(psIonClient, psIonHandle); + if (psScatterList == NULL) + { + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto exitFailMap; + } + + /* + We do a two pass process, 1st workout how many pages there + are, 2nd fill in the data. + */ + for (i=0;i<2;i++) + { + psTemp = psScatterList; + if (i == 1) + { + pasSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL); + if (pasSysPhysAddr == NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exitFailAlloc; + } + ui32PageCount = 0; /* Reset the page count a we use if for the index */ + } + + while(psTemp) + { + IMG_UINT32 j; + + for (j=0;j<psTemp->length;j+=PAGE_SIZE) + { + if (i == 1) + { + /* Pass 2: Get the page data */ + pasSysPhysAddr[ui32PageCount].uiAddr = sg_phys(psTemp); + } + ui32PageCount++; + } + psTemp = sg_next(psTemp); + } + } + + pvKernAddr = ion_map_kernel(psIonClient, psIonHandle); + if (IS_ERR(pvKernAddr)) + { + pvKernAddr = IMG_NULL; + } + + psImportData->pvKernAddr = pvKernAddr; + + *ppvKernAddr = pvKernAddr; + *pui32PageCount = ui32PageCount; + *ppasSysPhysAddr = pasSysPhysAddr; + *phPriv = psImportData; + return PVRSRV_OK; + +exitFailAlloc: + ion_unmap_dma(psIonClient, psIonHandle); +exitFailMap: + ion_free(psIonClient, psIonHandle); +exitFailImport: + kfree(psImportData); + return eError; +} + + +IMG_VOID IonUnimportBufferAndReleasePhysAddr(IMG_HANDLE hPriv) +{ + ION_IMPORT_DATA *psImportData = hPriv; + + ion_unmap_dma(psImportData->psIonClient, psImportData->psIonHandle); + if (psImportData->pvKernAddr) + { + ion_unmap_kernel(psImportData->psIonClient, psImportData->psIonHandle); + } + ion_free(psImportData->psIonClient, psImportData->psIonHandle); + kfree(psImportData); +} +#endif diff --git a/pvr-source/services4/srvkm/env/linux/ion.h b/pvr-source/services4/srvkm/env/linux/ion.h new file mode 100644 index 0000000..1cf385d --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/ion.h @@ -0,0 +1,74 @@ +/*************************************************************************/ /*! +@Title Ion driver inter-operability code. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __IMG_LINUX_ION_H__ +#define __IMG_LINUX_ION_H__ + +#include <linux/ion.h> +#if defined (CONFIG_ION_OMAP) +#include <linux/omap_ion.h> +#endif +#if defined (SUPPORT_ION) +#include "img_types.h" +#include "servicesext.h" +#endif + +int PVRSRVExportFDToIONHandles(int fd, struct ion_client **client, + struct ion_handle **handles, + unsigned int *num_handles); + +struct ion_handle *PVRSRVExportFDToIONHandle(int fd, + struct ion_client **client); + +#if defined (SUPPORT_ION) +PVRSRV_ERROR IonInit(IMG_VOID); +IMG_VOID IonDeinit(IMG_VOID); + +PVRSRV_ERROR IonImportBufferAndAquirePhysAddr(IMG_HANDLE hIonDev, + IMG_HANDLE hIonFD, + IMG_UINT32 *pui32PageCount, + IMG_SYS_PHYADDR **ppasSysPhysAddr, + IMG_PVOID *ppvKernAddr, + IMG_HANDLE *phPriv); + +IMG_VOID IonUnimportBufferAndReleasePhysAddr(IMG_HANDLE hPriv); +#endif +#endif /* __IMG_LINUX_ION_H__ */ diff --git a/pvr-source/services4/srvkm/env/linux/linkage.h b/pvr-source/services4/srvkm/env/linux/linkage.h new file mode 100644 index 0000000..55cd4f0 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/linkage.h @@ -0,0 +1,72 @@ +/*************************************************************************/ /*! +@Title Linux specific Services code internal interfaces +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Interfaces between various parts of the Linux specific + Services code, that don't have any other obvious + header file to go into. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __LINKAGE_H__ +#define __LINKAGE_H__ + +#if !defined(SUPPORT_DRI_DRM) +long PVRSRV_BridgeDispatchKM(struct file *file, unsigned int cmd, unsigned long arg); +#endif + +IMG_VOID PVRDPFInit(IMG_VOID); +PVRSRV_ERROR PVROSFuncInit(IMG_VOID); +IMG_VOID PVROSFuncDeInit(IMG_VOID); + +#ifdef DEBUG + +IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data); +void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el); + +#ifdef PVR_MANUAL_POWER_CONTROL +IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data); + +void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el); + +#endif /* PVR_MANUAL_POWER_CONTROL */ + +#endif /* DEBUG */ + +#endif /* __LINKAGE_H__ */ +/***************************************************************************** + End of file (linkage.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/env/linux/lock.h b/pvr-source/services4/srvkm/env/linux/lock.h new file mode 100644 index 0000000..11adcaa --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/lock.h @@ -0,0 +1,56 @@ +/*************************************************************************/ /*! +@Title Main driver lock +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description The main driver lock, held in most places in + the driver. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __LOCK_H__ +#define __LOCK_H__ + +/* + * Main driver lock, used to ensure driver code is single threaded. + * There are some places where this lock must not be taken, such as + * in the mmap related deriver entry points. + */ +extern PVRSRV_LINUX_MUTEX gPVRSRVLock; + +#endif /* __LOCK_H__ */ +/***************************************************************************** + End of file (lock.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/env/linux/mm.c b/pvr-source/services4/srvkm/env/linux/mm.c new file mode 100644 index 0000000..0815e46 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mm.c @@ -0,0 +1,2945 @@ +/*************************************************************************/ /*! +@Title Misc memory management utility functions for Linux +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#if !defined(PVR_LINUX_MEM_AREA_POOL_MAX_PAGES) +#define PVR_LINUX_MEM_AREA_POOL_MAX_PAGES 0 +#endif + +#include <linux/kernel.h> +#include <asm/atomic.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <asm/io.h> +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) +#include <linux/wrapper.h> +#endif +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/sched.h> + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) +#include <linux/shrinker.h> +#endif +#endif + +#include "img_defs.h" +#include "services.h" +#include "servicesint.h" +#include "syscommon.h" +#include "mutils.h" +#include "mm.h" +#include "pvrmmap.h" +#include "mmap.h" +#include "osfunc.h" +#include "pvr_debug.h" +#include "proc.h" +#include "mutex.h" +#include "lock.h" + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + #include "lists.h" +#endif + +/* If there is no explicit definition + * for the minimum DMM alignment size, + * then set it to "0" and let ION/DMM + * set the minimum value. */ +#ifndef CONFIG_TILER_GRANULARITY +#define CONFIG_TILER_GRANULARITY 0 +#endif + +/* + * The page pool entry count is an atomic int so that the shrinker function + * can return it even when we can't take the lock that protects the page pool + * list. + */ +static atomic_t g_sPagePoolEntryCount = ATOMIC_INIT(0); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +typedef enum { + DEBUG_MEM_ALLOC_TYPE_KMALLOC, + DEBUG_MEM_ALLOC_TYPE_VMALLOC, + DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, + DEBUG_MEM_ALLOC_TYPE_IOREMAP, + DEBUG_MEM_ALLOC_TYPE_IO, + DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, + DEBUG_MEM_ALLOC_TYPE_ION, +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + DEBUG_MEM_ALLOC_TYPE_VMAP, +#endif + DEBUG_MEM_ALLOC_TYPE_COUNT +} DEBUG_MEM_ALLOC_TYPE; + +typedef struct _DEBUG_MEM_ALLOC_REC +{ + DEBUG_MEM_ALLOC_TYPE eAllocType; + IMG_VOID *pvKey; /* Some unique value (private to the eAllocType) */ + IMG_VOID *pvCpuVAddr; + IMG_UINT32 ulCpuPAddr; + IMG_VOID *pvPrivateData; + IMG_UINT32 ui32Bytes; + pid_t pid; + IMG_CHAR *pszFileName; + IMG_UINT32 ui32Line; + + struct _DEBUG_MEM_ALLOC_REC *psNext; + struct _DEBUG_MEM_ALLOC_REC **ppsThis; +} DEBUG_MEM_ALLOC_REC; + +static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, IMG_BOOL, IMG_FALSE) +static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC) +static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC) +static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC) +static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC) + + +static DEBUG_MEM_ALLOC_REC *g_MemoryRecords; + +static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT]; +static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT]; + +/* vmalloc + kmalloc + alloc_pages + kmem_cache */ +static IMG_UINT32 g_SysRAMWaterMark; /* Doesn't include page pool */ +static IMG_UINT32 g_SysRAMHighWaterMark; /* *DOES* include page pool */ + +static inline IMG_UINT32 +SysRAMTrueWaterMark(void) +{ + return g_SysRAMWaterMark + PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount)); +} + +/* ioremap + io */ +static IMG_UINT32 g_IOMemWaterMark; +static IMG_UINT32 g_IOMemHighWaterMark; + +static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType, + IMG_VOID *pvKey, + IMG_VOID *pvCpuVAddr, + IMG_UINT32 ulCpuPAddr, + IMG_VOID *pvPrivateData, + IMG_UINT32 ui32Bytes, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line); + +static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + +static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType); + + +static struct proc_dir_entry *g_SeqFileMemoryRecords; +static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off); +static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementMemoryRecords(struct seq_file * sfile, loff_t off); + +#endif + + +#if defined(DEBUG_LINUX_MEM_AREAS) +typedef struct _DEBUG_LINUX_MEM_AREA_REC +{ + LinuxMemArea *psLinuxMemArea; + IMG_UINT32 ui32Flags; + pid_t pid; + + struct _DEBUG_LINUX_MEM_AREA_REC *psNext; + struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis; +}DEBUG_LINUX_MEM_AREA_REC; + + +static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC) +static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC) +static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC) +static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC) + + + + +static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords; +static IMG_UINT32 g_LinuxMemAreaCount; +static IMG_UINT32 g_LinuxMemAreaWaterMark; +static IMG_UINT32 g_LinuxMemAreaHighWaterMark; + + +static struct proc_dir_entry *g_SeqFileMemArea; + +static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off); +static void ProcSeqShowMemArea(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off); + +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static PVRSRV_LINUX_MUTEX g_sDebugMutex; +#endif + +#if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)) +static void ProcSeqStartstopDebugMutex(struct seq_file *sfile,IMG_BOOL start); +#endif + +typedef struct +{ + /* Linkage for page pool LRU list */ + struct list_head sPagePoolItem; + + struct page *psPage; +} LinuxPagePoolEntry; + +static LinuxKMemCache *g_PsLinuxMemAreaCache; +static LinuxKMemCache *g_PsLinuxPagePoolCache; + +static LIST_HEAD(g_sPagePoolList); +static int g_iPagePoolMaxEntries; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length); +static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length); +#endif + +static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID); +static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea); +#if defined(DEBUG_LINUX_MEM_AREAS) +static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags); +static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea); +static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea); +#endif + + +static inline IMG_BOOL +AreaIsUncached(IMG_UINT32 ui32AreaFlags) +{ + return (ui32AreaFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED)) != 0; +} + +static inline IMG_BOOL +CanFreeToPool(LinuxMemArea *psLinuxMemArea) +{ + return AreaIsUncached(psLinuxMemArea->ui32AreaFlags) && !psLinuxMemArea->bNeedsCacheInvalidate; +} + +IMG_VOID * +_KMallocWrapper(IMG_UINT32 ui32ByteSize, gfp_t uFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ + IMG_VOID *pvRet; + pvRet = kmalloc(ui32ByteSize, uFlags); +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvRet) + { + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC, + pvRet, + pvRet, + 0, + NULL, + ui32ByteSize, + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + return pvRet; +} + + +IMG_VOID +_KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr, pszFileName, ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + kfree(pvCpuVAddr); +} + + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static IMG_VOID +DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType, + IMG_VOID *pvKey, + IMG_VOID *pvCpuVAddr, + IMG_UINT32 ulCpuPAddr, + IMG_VOID *pvPrivateData, + IMG_UINT32 ui32Bytes, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + DEBUG_MEM_ALLOC_REC *psRecord; + + LinuxLockMutex(&g_sDebugMutex); + + psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL); + + psRecord->eAllocType = eAllocType; + psRecord->pvKey = pvKey; + psRecord->pvCpuVAddr = pvCpuVAddr; + psRecord->ulCpuPAddr = ulCpuPAddr; + psRecord->pvPrivateData = pvPrivateData; + psRecord->pid = OSGetCurrentProcessIDKM(); + psRecord->ui32Bytes = ui32Bytes; + psRecord->pszFileName = pszFileName; + psRecord->ui32Line = ui32Line; + + List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord); + + g_WaterMarkData[eAllocType] += ui32Bytes; + if (g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType]) + { + g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType]; + } + + if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES + || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) + { + IMG_UINT32 ui32SysRAMTrueWaterMark; + + g_SysRAMWaterMark += ui32Bytes; + ui32SysRAMTrueWaterMark = SysRAMTrueWaterMark(); + + if (ui32SysRAMTrueWaterMark > g_SysRAMHighWaterMark) + { + g_SysRAMHighWaterMark = ui32SysRAMTrueWaterMark; + } + } + else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP + || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) + { + g_IOMemWaterMark += ui32Bytes; + if (g_IOMemWaterMark > g_IOMemHighWaterMark) + { + g_IOMemHighWaterMark = g_IOMemWaterMark; + } + } + + LinuxUnLockMutex(&g_sDebugMutex); +} + + +static IMG_BOOL DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC *psCurrentRecord, va_list va) +{ + DEBUG_MEM_ALLOC_TYPE eAllocType; + IMG_VOID *pvKey; + + eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE); + pvKey = va_arg(va, IMG_VOID*); + + if (psCurrentRecord->eAllocType == eAllocType + && psCurrentRecord->pvKey == pvKey) + { + eAllocType = psCurrentRecord->eAllocType; + g_WaterMarkData[eAllocType] -= psCurrentRecord->ui32Bytes; + + if (eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC + || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES + || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) + { + g_SysRAMWaterMark -= psCurrentRecord->ui32Bytes; + } + else if (eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP + || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO) + { + g_IOMemWaterMark -= psCurrentRecord->ui32Bytes; + } + + List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord); + kfree(psCurrentRecord); + + return IMG_TRUE; + } + else + { + return IMG_FALSE; + } +} + + +static IMG_VOID +DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +/* DEBUG_MEM_ALLOC_REC **ppsCurrentRecord;*/ + + LinuxLockMutex(&g_sDebugMutex); + + /* Locate the corresponding allocation entry */ + if (!List_DEBUG_MEM_ALLOC_REC_IMG_BOOL_Any_va(g_MemoryRecords, + DebugMemAllocRecordRemove_AnyVaCb, + eAllocType, + pvKey)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n", + __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey, + pszFileName, ui32Line)); + } + + LinuxUnLockMutex(&g_sDebugMutex); +} + + +static IMG_CHAR * +DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType) +{ + IMG_CHAR *apszDebugMemoryRecordTypes[] = { + "KMALLOC", + "VMALLOC", + "ALLOC_PAGES", + "IOREMAP", + "IO", + "KMEM_CACHE_ALLOC", +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + "VMAP" +#endif + }; + return apszDebugMemoryRecordTypes[eAllocType]; +} +#endif + + +static IMG_BOOL +AllocFlagsToPGProt(pgprot_t *pPGProtFlags, IMG_UINT32 ui32AllocFlags) +{ + pgprot_t PGProtFlags; + + switch (ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK) + { + case PVRSRV_HAP_CACHED: + PGProtFlags = PAGE_KERNEL; + break; + case PVRSRV_HAP_WRITECOMBINE: + PGProtFlags = PGPROT_WC(PAGE_KERNEL); + break; + case PVRSRV_HAP_UNCACHED: + PGProtFlags = PGPROT_UC(PAGE_KERNEL); + break; + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unknown mapping flags=0x%08x", + __FUNCTION__, ui32AllocFlags)); + dump_stack(); + return IMG_FALSE; + } + + *pPGProtFlags = PGProtFlags; + + return IMG_TRUE; +} + +IMG_VOID * +_VMallocWrapper(IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32AllocFlags, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + pgprot_t PGProtFlags; + IMG_VOID *pvRet; + + if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags)) + { + return NULL; + } + + /* Allocate virtually contiguous pages */ + pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvRet) + { + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC, + pvRet, + pvRet, + 0, + NULL, + PAGE_ALIGN(ui32Bytes), + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvRet; +} + + +IMG_VOID +_VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr, pszFileName, ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + vfree(pvCpuVAddr); +} + + +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) +static IMG_VOID * +_VMapWrapper(struct page **ppsPageList, IMG_UINT32 ui32NumPages, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ + pgprot_t PGProtFlags; + IMG_VOID *pvRet; + + if (!AllocFlagsToPGProt(&PGProtFlags, ui32AllocFlags)) + { + return NULL; + } + + pvRet = vmap(ppsPageList, ui32NumPages, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvRet) + { + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMAP, + pvRet, + pvRet, + 0, + NULL, + PAGES_TO_BYTES(ui32NumPages), + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvRet; +} + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags) _VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags, __FILE__, __LINE__) +#else +#define VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags) _VMapWrapper(ppsPageList, ui32Bytes, ui32AllocFlags, NULL, 0) +#endif + + +static IMG_VOID +_VUnmapWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMAP, pvCpuVAddr, pszFileName, ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + vunmap(pvCpuVAddr); +} + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, __FILE__, __LINE__) +#else +#define VUnmapWrapper(pvCpuVAddr) _VUnmapWrapper(pvCpuVAddr, NULL, 0) +#endif + +#endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + + +IMG_VOID +_KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject, pszFileName, ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + kmem_cache_free(psCache, pvObject); +} + + +const IMG_CHAR * +KMemCacheNameWrapper(LinuxKMemCache *psCache) +{ + PVR_UNREFERENCED_PARAMETER(psCache); + + /* In this case kmem_cache_t is an incomplete typedef, + * so we can't even de-reference to get the name member. It is also a GPL export symbol */ + return ""; +} + + +static LinuxPagePoolEntry * +LinuxPagePoolEntryAlloc(IMG_VOID) +{ + return KMemCacheAllocWrapper(g_PsLinuxPagePoolCache, GFP_KERNEL); +} + +static IMG_VOID +LinuxPagePoolEntryFree(LinuxPagePoolEntry *psPagePoolEntry) +{ + KMemCacheFreeWrapper(g_PsLinuxPagePoolCache, psPagePoolEntry); +} + + +static struct page * +AllocPageFromLinux(void) +{ + struct page *psPage; + + psPage = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0); + if (!psPage) + { + return NULL; + + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + /* Reserve those pages to allow them to be re-mapped to user space */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + SetPageReserved(psPage); +#else + mem_map_reserve(psPage); +#endif +#endif + return psPage; +} + + +static IMG_VOID +FreePageToLinux(struct page *psPage) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + ClearPageReserved(psPage); +#else + mem_map_reserve(psPage); +#endif +#endif + __free_pages(psPage, 0); +} + + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) +static DEFINE_MUTEX(g_sPagePoolMutex); + +static inline void +PagePoolLock(void) +{ + mutex_lock(&g_sPagePoolMutex); +} + +static inline void +PagePoolUnlock(void) +{ + mutex_unlock(&g_sPagePoolMutex); +} + +static inline int +PagePoolTrylock(void) +{ + return mutex_trylock(&g_sPagePoolMutex); +} + +#else /* (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) */ +static inline void +PagePoolLock(void) +{ +} + +static inline void +PagePoolUnlock(void) +{ +} + +static inline int +PagePoolTrylock(void) +{ + return 1; +} +#endif /* (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) */ + + +static inline void +AddEntryToPool(LinuxPagePoolEntry *psPagePoolEntry) +{ + list_add_tail(&psPagePoolEntry->sPagePoolItem, &g_sPagePoolList); + atomic_inc(&g_sPagePoolEntryCount); +} + +static inline void +RemoveEntryFromPool(LinuxPagePoolEntry *psPagePoolEntry) +{ + list_del(&psPagePoolEntry->sPagePoolItem); + atomic_dec(&g_sPagePoolEntryCount); +} + +static inline LinuxPagePoolEntry * +RemoveFirstEntryFromPool(void) +{ + LinuxPagePoolEntry *psPagePoolEntry; + + if (list_empty(&g_sPagePoolList)) + { + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + + return NULL; + } + + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) > 0); + + psPagePoolEntry = list_first_entry(&g_sPagePoolList, LinuxPagePoolEntry, sPagePoolItem); + + RemoveEntryFromPool(psPagePoolEntry); + + return psPagePoolEntry; +} + +static struct page * +AllocPage(IMG_UINT32 ui32AreaFlags, IMG_BOOL *pbFromPagePool) +{ + struct page *psPage = NULL; + + /* + * Only uncached allocations can come from the page pool. + * The page pool is currently used to reduce the cost of + * invalidating the CPU cache when uncached memory is allocated. + */ + if (AreaIsUncached(ui32AreaFlags) && atomic_read(&g_sPagePoolEntryCount) != 0) + { + LinuxPagePoolEntry *psPagePoolEntry; + + PagePoolLock(); + psPagePoolEntry = RemoveFirstEntryFromPool(); + PagePoolUnlock(); + + /* List may have changed since we checked the counter */ + if (psPagePoolEntry) + { + psPage = psPagePoolEntry->psPage; + LinuxPagePoolEntryFree(psPagePoolEntry); + *pbFromPagePool = IMG_TRUE; + } + } + + if (!psPage) + { + psPage = AllocPageFromLinux(); + if (psPage) + { + *pbFromPagePool = IMG_FALSE; + } + } + + return psPage; + +} + +static IMG_VOID +FreePage(IMG_BOOL bToPagePool, struct page *psPage) +{ + /* Only uncached allocations can be freed to the page pool */ + if (bToPagePool && atomic_read(&g_sPagePoolEntryCount) < g_iPagePoolMaxEntries) + { + LinuxPagePoolEntry *psPagePoolEntry = LinuxPagePoolEntryAlloc(); + if (psPagePoolEntry) + { + psPagePoolEntry->psPage = psPage; + + PagePoolLock(); + AddEntryToPool(psPagePoolEntry); + PagePoolUnlock(); + + return; + } + } + + FreePageToLinux(psPage); +} + +static IMG_VOID +FreePagePool(IMG_VOID) +{ + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + + PagePoolLock(); + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + PVR_DPF((PVR_DBG_MESSAGE,"%s: Freeing %d pages from pool", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount))); +#else + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + PVR_ASSERT(list_empty(&g_sPagePoolList)); +#endif + + list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem) + { + RemoveEntryFromPool(psPagePoolEntry); + + FreePageToLinux(psPagePoolEntry->psPage); + LinuxPagePoolEntryFree(psPagePoolEntry); + } + + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + + PagePoolUnlock(); +} + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) +#if defined(PVRSRV_NEED_PVR_ASSERT) +static struct shrinker g_sShrinker; +#endif + +static int +ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) +{ + unsigned long uNumToScan = psShrinkControl->nr_to_scan; + + PVR_ASSERT(psShrinker == &g_sShrinker); + (void)psShrinker; + + if (uNumToScan != 0) + { + LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; + + PVR_DPF((PVR_DBG_MESSAGE,"%s: Number to scan: %ld", __FUNCTION__, uNumToScan)); + PVR_DPF((PVR_DBG_MESSAGE,"%s: Pages in pool before scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount))); + + if (!PagePoolTrylock()) + { + PVR_TRACE(("%s: Couldn't get page pool lock", __FUNCTION__)); + return -1; + } + + list_for_each_entry_safe(psPagePoolEntry, psTempPoolEntry, &g_sPagePoolList, sPagePoolItem) + { + RemoveEntryFromPool(psPagePoolEntry); + + FreePageToLinux(psPagePoolEntry->psPage); + LinuxPagePoolEntryFree(psPagePoolEntry); + + if (--uNumToScan == 0) + { + break; + } + } + + if (list_empty(&g_sPagePoolList)) + { + PVR_ASSERT(atomic_read(&g_sPagePoolEntryCount) == 0); + } + + PagePoolUnlock(); + + PVR_DPF((PVR_DBG_MESSAGE,"%s: Pages in pool after scan: %d", __FUNCTION__, atomic_read(&g_sPagePoolEntryCount))); + } + + return atomic_read(&g_sPagePoolEntryCount); +} +#endif + +static IMG_BOOL +AllocPages(IMG_UINT32 ui32AreaFlags, struct page ***pppsPageList, IMG_HANDLE *phBlockPageList, IMG_UINT32 ui32NumPages, IMG_BOOL *pbFromPagePool) +{ + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + IMG_INT32 i; /* Must be signed; see "for" loop conditions */ + PVRSRV_ERROR eError; + IMG_BOOL bFromPagePool = IMG_FALSE; + + eError = OSAllocMem(0, sizeof(*ppsPageList) * ui32NumPages, (IMG_VOID **)&ppsPageList, &hBlockPageList, + "Array of pages"); + if (eError != PVRSRV_OK) + { + goto failed_page_list_alloc; + } + + *pbFromPagePool = IMG_TRUE; + for(i = 0; i < (IMG_INT32)ui32NumPages; i++) + { + ppsPageList[i] = AllocPage(ui32AreaFlags, &bFromPagePool); + if (!ppsPageList[i]) + { + goto failed_alloc_pages; + } + *pbFromPagePool &= bFromPagePool; + } + + *pppsPageList = ppsPageList; + *phBlockPageList = hBlockPageList; + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, + ppsPageList, + 0, + 0, + NULL, + PAGES_TO_BYTES(ui32NumPages), + "unknown", + 0 + ); +#endif + + return IMG_TRUE; + +failed_alloc_pages: + for(i--; i >= 0; i--) + { + FreePage(*pbFromPagePool, ppsPageList[i]); + } + (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList); + +failed_page_list_alloc: + return IMG_FALSE; +} + + +static IMG_VOID +FreePages(IMG_BOOL bToPagePool, struct page **ppsPageList, IMG_HANDLE hBlockPageList, IMG_UINT32 ui32NumPages) +{ + IMG_INT32 i; + + for(i = 0; i < (IMG_INT32)ui32NumPages; i++) + { + FreePage(bToPagePool, ppsPageList[i]); + } + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, ppsPageList, __FILE__, __LINE__); +#endif + + (IMG_VOID) OSFreeMem(0, sizeof(*ppsPageList) * ui32NumPages, ppsPageList, hBlockPageList); +} + + +LinuxMemArea * +NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea = NULL; + IMG_VOID *pvCpuVAddr; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + IMG_UINT32 ui32NumPages = 0; + struct page **ppsPageList = NULL; + IMG_HANDLE hBlockPageList; +#endif + IMG_BOOL bFromPagePool = IMG_FALSE; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + goto failed; + } + +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + ui32NumPages = RANGE_TO_PAGES(ui32Bytes); + + if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool)) + { + goto failed; + } + + pvCpuVAddr = VMapWrapper(ppsPageList, ui32NumPages, ui32AreaFlags); +#else /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags); + if (!pvCpuVAddr) + { + goto failed; + } +/* PG_reserved was deprecated in linux-2.6.15 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + /* Reserve those pages to allow them to be re-mapped to user space */ + ReservePages(pvCpuVAddr, ui32Bytes); +#endif +#endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC; + psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + psLinuxMemArea->uData.sVmalloc.ppsPageList = ppsPageList; + psLinuxMemArea->uData.sVmalloc.hBlockPageList = hBlockPageList; +#endif + psLinuxMemArea->ui32ByteSize = ui32Bytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + /* This works around a problem where Linux will not invalidate + * the cache for physical memory it frees that is direct mapped. + * + * As a result, cache entries remain that may be subsequently flushed + * to these physical pages after they have been allocated for another + * purpose. For a subsequent cached use of this memory, that is not a + * problem, but if we are allocating uncached or write-combined memory, + * and bypassing the cache, it can cause subsequent uncached writes to + * the memory to be replaced with junk from the cache. + * + * If the pages are from our page cache, no cache invalidate is needed. + * + * This just handles the __vmalloc() case (when we have a kernel virtual + * address range). The alloc_pages() path is handled in mmap.c. + */ + if (AreaIsUncached(ui32AreaFlags) && !bFromPagePool) + { + OSInvalidateCPUCacheRangeKM(psLinuxMemArea, 0, pvCpuVAddr, ui32Bytes); + } + + return psLinuxMemArea; + +failed: + PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__)); +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + if (ppsPageList) + { + FreePages(bFromPagePool, ppsPageList, hBlockPageList, ui32NumPages); + } +#endif + if (psLinuxMemArea) + { + LinuxMemAreaStructFree(psLinuxMemArea); + } + + return NULL; +} + + +IMG_VOID +FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + IMG_UINT32 ui32NumPages; + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; +#endif + + PVR_ASSERT(psLinuxMemArea); + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC); + PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p", + __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress)); + +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + VUnmapWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); + + ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize); + ppsPageList = psLinuxMemArea->uData.sVmalloc.ppsPageList; + hBlockPageList = psLinuxMemArea->uData.sVmalloc.hBlockPageList; + + FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages); +#else +/* PG_reserved was deprecated in linux-2.6.15 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress, + psLinuxMemArea->ui32ByteSize); +#endif + + VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress); +#endif /* defined(PVR_LINUX_MEM_AREA_USE_VMAP) */ + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +/* Reserve pages of memory in order that they're not automatically + deallocated after the last user reference dies. */ +static IMG_VOID +ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length) +{ + IMG_VOID *pvPage; + IMG_VOID *pvEnd = pvAddress + ui32Length; + + for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE) + { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + SetPageReserved(vmalloc_to_page(pvPage)); +#else + mem_map_reserve(vmalloc_to_page(pvPage)); +#endif + } +} + + +/* Un-reserve pages of memory in order that they can be freed. */ +static IMG_VOID +UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length) +{ + IMG_VOID *pvPage; + IMG_VOID *pvEnd = pvAddress + ui32Length; + + for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE) + { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) + ClearPageReserved(vmalloc_to_page(pvPage)); +#else + mem_map_unreserve(vmalloc_to_page(pvPage)); +#endif + } +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) */ + + +IMG_VOID * +_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32MappingFlags, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + IMG_VOID *pvIORemapCookie; + + switch (ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK) + { + case PVRSRV_HAP_CACHED: + pvIORemapCookie = (IMG_VOID *)IOREMAP(BasePAddr.uiAddr, ui32Bytes); + break; + case PVRSRV_HAP_WRITECOMBINE: + pvIORemapCookie = (IMG_VOID *)IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes); + break; + case PVRSRV_HAP_UNCACHED: + pvIORemapCookie = (IMG_VOID *)IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags")); + return NULL; + } + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + if (pvIORemapCookie) + { + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP, + pvIORemapCookie, + pvIORemapCookie, + BasePAddr.uiAddr, + NULL, + ui32Bytes, + pszFileName, + ui32Line + ); + } +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvIORemapCookie; +} + + +IMG_VOID +_IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line) +{ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie, pszFileName, ui32Line); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + iounmap(pvIORemapCookie); +} + + +LinuxMemArea * +NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea; + IMG_VOID *pvIORemapCookie; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags); + if (!pvIORemapCookie) + { + LinuxMemAreaStructFree(psLinuxMemArea); + return NULL; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP; + psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie; + psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr; + psLinuxMemArea->ui32ByteSize = ui32Bytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; +} + + +IMG_VOID +FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie); + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +/* + * Avoid using remap_pfn_range on RAM, if possible. On x86 systems, with + * PAT enabled, remap_pfn_range checks the page attributes requested by + * remap_pfn_range against those of the direct kernel mapping for those + * pages (if any). This is rather annoying if the pages have been obtained + * with alloc_pages, where we just ask for raw pages; we don't care about + * the direct mapping. This latter issue arises when device memory is + * exported from one process to another. Services implements this + * using memory wrapping, which ends up creating an external KV memory area. + */ +static IMG_BOOL +TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig) +{ + IMG_UINT32 ui32; + IMG_UINT32 ui32AddrChk; + IMG_UINT32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes); + + /* + * If bPhysContig is IMG_TRUE, we must assume psSysPhysAddr points + * to the address of the first page, not an array of page addresses. + */ + for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr; + ui32 < ui32NumPages; + ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr) + { + if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk))) + { + break; + } + } + if (ui32 == ui32NumPages) + { + return IMG_FALSE; + } + + if (!bPhysContig) + { + for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr; + ui32 < ui32NumPages; + ui32++, ui32AddrChk += PAGE_SIZE) + { + if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk) + { + return IMG_FALSE; + } + } + } + + return IMG_TRUE; +} +#endif + +LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV; + psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr; + psLinuxMemArea->uData.sExternalKV.bPhysContig = +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + (bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, ui32Bytes, bPhysContig)) + ? IMG_TRUE : IMG_FALSE; +#else + bPhysContig; +#endif + if (psLinuxMemArea->uData.sExternalKV.bPhysContig) + { + psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr; + } + else + { + psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr; + } + psLinuxMemArea->ui32ByteSize = ui32Bytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; +} + + +IMG_VOID +FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +LinuxMemArea * +NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + /* Nothing to activly do. We just keep a record of the physical range. */ + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO; + psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr; + psLinuxMemArea->ui32ByteSize = ui32Bytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO, + (IMG_VOID *)BasePAddr.uiAddr, + 0, + BasePAddr.uiAddr, + NULL, + ui32Bytes, + "unknown", + 0 + ); +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; +} + + +IMG_VOID +FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, + (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr, __FILE__, __LINE__); +#endif + + /* Nothing more to do than free the LinuxMemArea struct */ + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +LinuxMemArea * +NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags) +{ + LinuxMemArea *psLinuxMemArea; + IMG_UINT32 ui32NumPages; + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + IMG_BOOL bFromPagePool; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + goto failed_area_alloc; + } + + ui32NumPages = RANGE_TO_PAGES(ui32Bytes); + + if (!AllocPages(ui32AreaFlags, &ppsPageList, &hBlockPageList, ui32NumPages, &bFromPagePool)) + { + goto failed_alloc_pages; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES; + psLinuxMemArea->uData.sPageList.ppsPageList = ppsPageList; + psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList; + psLinuxMemArea->ui32ByteSize = ui32Bytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + + /* We defer the cache flush to the first user mapping of this memory */ + psLinuxMemArea->bNeedsCacheInvalidate = AreaIsUncached(ui32AreaFlags) && !bFromPagePool; + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + + return psLinuxMemArea; + +failed_alloc_pages: + LinuxMemAreaStructFree(psLinuxMemArea); +failed_area_alloc: + PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__)); + + return NULL; +} + + +IMG_VOID +FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + IMG_UINT32 ui32NumPages; + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + + PVR_ASSERT(psLinuxMemArea); + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + ui32NumPages = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize); + ppsPageList = psLinuxMemArea->uData.sPageList.ppsPageList; + hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList; + + FreePages(CanFreeToPool(psLinuxMemArea), ppsPageList, hBlockPageList, ui32NumPages); + + LinuxMemAreaStructFree(psLinuxMemArea); +} + +#if defined(CONFIG_ION_OMAP) + +#include "env_perproc.h" + +#include <linux/ion.h> +#include <linux/omap_ion.h> +#include <linux/scatterlist.h> + +extern struct ion_client *gpsIONClient; + +LinuxMemArea * +NewIONLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags, + IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength) +{ + const IMG_UINT32 ui32AllocDataLen = + offsetof(struct omap_ion_tiler_alloc_data, handle); + struct omap_ion_tiler_alloc_data asAllocData[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES]; + u32 *pu32PageAddrs[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES] = { NULL, NULL, NULL}; + IMG_UINT32 i, j, ui32NumHandlesPerFd; + IMG_BYTE *pbPrivData = pvPrivData; + IMG_CPU_PHYADDR *pCPUPhysAddrs; + IMG_UINT32 iNumPages[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES] = { 0, 0, 0}; + LinuxMemArea *psLinuxMemArea; + IMG_UINT32 ui32ProcID; + IMG_UINT32 ui32TotalPagesSizeInBytes = 0, ui32TotalPages = 0; + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate LinuxMemArea struct", __func__)); + goto err_out; + } + + /* Depending on the UM config, userspace might give us info for + * one, two or three ION allocations. Divide the total size of data we + * were given by this ui32AllocDataLen, and check it's 1 or 2. + * Otherwise abort. + */ + BUG_ON(ui32PrivDataLength != ui32AllocDataLen && + ui32PrivDataLength != ui32AllocDataLen * 2 && + ui32PrivDataLength != ui32AllocDataLen * 3); + /* This is bad !- change this logic to pass in the size or + * use uniformed API */ + ui32NumHandlesPerFd = ui32PrivDataLength / ui32AllocDataLen; + + ui32ProcID = OSGetCurrentProcessIDKM(); + + memset(asAllocData, 0x00, sizeof(asAllocData)); + + /* We do not care about what the first (Y) buffer offset would be, + * but we do care for the UV buffers to be co-aligned with Y + * This for SGX to find the UV offset solely based on the height + * and stride of the YUV buffer.This is very important for OMAP4470 + * and later chipsets, where SGX version is 544. 544 and later use + * non-shader based YUV to RGB conversion unit that require + * contiguous GPU virtual space */ + for(i = 0; i < ui32NumHandlesPerFd; i++) + { + memcpy(&asAllocData[i], &pbPrivData[i * ui32AllocDataLen], ui32AllocDataLen); + asAllocData[i].token = ui32ProcID; + +#ifndef SGX_DISABLE_DMM_OFFSET_BUFFER_ALLOCATIONS + if(i == 0) + { + /* Tiler API says: + * Allocate first buffer with the required alignment + * and an offset of 0 ... */ + asAllocData[i].out_align = CONFIG_TILER_GRANULARITY; + asAllocData[i].offset = 0; + } + else + { /* .. Then for the second buffer, use the offset from the first + * buffer with alignment of PAGE_SIZE */ + asAllocData[i].out_align = PAGE_SIZE; + asAllocData[i].offset = asAllocData[0].offset; + } +#else + asAllocData[i].offset = 0; + asAllocData[i].out_align = PAGE_SIZE; +#endif + + if(asAllocData[i].fmt == TILER_PIXEL_FMT_PAGE) + { + /* 1D DMM Buffers */ + struct scatterlist *sg, *sglist; + IMG_UINT32 ui32Num1dPages; + + asAllocData[i].handle = ion_alloc (gpsIONClient, + ui32Bytes, + PAGE_SIZE, (1 << OMAP_ION_HEAP_SYSTEM)); + + if (asAllocData[i].handle == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate via ion_alloc", + __func__)); + goto err_free; + } + + sglist = ion_map_dma (gpsIONClient, asAllocData[i].handle); + if (sglist == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to compute pages", + __func__)); + goto err_free; + } + + ui32Num1dPages = (ui32Bytes >> PAGE_SHIFT); + pu32PageAddrs[i] = kmalloc (sizeof(u32) * ui32Num1dPages, GFP_KERNEL); + if (pu32PageAddrs[i] == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate page array", + __func__)); + goto err_free; + } + + for_each_sg (sglist, sg, ui32Num1dPages, j) + { + pu32PageAddrs[i][j] = sg_phys (sg); + } + + iNumPages[i] = ui32Num1dPages; + } + else /* 2D DMM Buffers */ + { + if (omap_ion_tiler_alloc(gpsIONClient, &asAllocData[i]) < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate via ion_tiler", + __func__)); + goto err_free; + } + + if (omap_tiler_pages(gpsIONClient, asAllocData[i].handle, &iNumPages[i], + &pu32PageAddrs[i]) < 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to compute tiler pages", + __func__)); + goto err_free; + } + } + } + + /* Basic sanity check on plane co-alignment */ + if((ui32NumHandlesPerFd > 1) && + (asAllocData[0].offset != asAllocData[1].offset)) + { + pr_err("%s: Y and UV offsets do not match for tiler handles " + "%p,%p: %d != %d \n " + "Expect issues with SGX544xx and later chipsets\n", + __func__, asAllocData[0].handle, asAllocData[1].handle, + (int)asAllocData[0].offset, (int)asAllocData[1].offset); + } + + /* Assume the user-allocator has already done the tiler math and that the + * number of tiler pages allocated matches any other allocation type. + */ + for(i = 0; i < ui32NumHandlesPerFd; i++) + { + ui32TotalPages += iNumPages[i]; + } + + BUG_ON(ui32Bytes != (ui32TotalPages * PAGE_SIZE)); + BUG_ON(sizeof(IMG_CPU_PHYADDR) != sizeof(int)); + + /* Glue the page lists together */ + pCPUPhysAddrs = vmalloc(sizeof(IMG_CPU_PHYADDR) * ui32TotalPages); + if (!pCPUPhysAddrs) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate page list", __func__)); + goto err_free; + } + + j = 0; + for(i = 0; i < ui32NumHandlesPerFd; i++) + { + IMG_UINT32 ui32PageIndx; + for(ui32PageIndx = 0; ui32PageIndx < iNumPages[i]; ui32PageIndx++) + { + pCPUPhysAddrs[j++].uiAddr = pu32PageAddrs[i][ui32PageIndx]; + } + + psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i] = + asAllocData[i].handle; + psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[i] = + ui32TotalPagesSizeInBytes + asAllocData[i].offset; + /* Add the number of pages this plane consists of */ + ui32TotalPagesSizeInBytes += (iNumPages[i] * PAGE_SIZE); + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ION; + psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs = pCPUPhysAddrs; + psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes = + ui32NumHandlesPerFd; + psLinuxMemArea->ui32ByteSize = ui32TotalPagesSizeInBytes; + psLinuxMemArea->ui32AreaFlags = ui32AreaFlags; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + + /* We defer the cache flush to the first user mapping of this memory */ + psLinuxMemArea->bNeedsCacheInvalidate = AreaIsUncached(ui32AreaFlags); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ION, + asAllocData[0].handle, + 0, + 0, + NULL, + PAGE_ALIGN(ui32Bytes), + "unknown", + 0 + ); +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags); +#endif + +err_out: + return psLinuxMemArea; + +err_free: + LinuxMemAreaStructFree(psLinuxMemArea); + psLinuxMemArea = IMG_NULL; + goto err_out; +} + +IMG_INT32 +GetIONLinuxMemAreaInfo(LinuxMemArea *psLinuxMemArea, IMG_UINT32* pui32AddressOffsets, + IMG_UINT32* ui32NumAddrOffsets) +{ + IMG_UINT32 i; + + if(!ui32NumAddrOffsets) + return -1; + + if(*ui32NumAddrOffsets < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes) + { + *ui32NumAddrOffsets = psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; + return -1; + } + + if(!pui32AddressOffsets) + return -1; + + for(i = 0; i < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; i++) + { + if(psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i]) + pui32AddressOffsets[i] = + psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[i]; + } + + *ui32NumAddrOffsets = psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; + + return psLinuxMemArea->ui32ByteSize; +} + +IMG_VOID +FreeIONLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + IMG_UINT32 i; + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ION, + psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[0], + __FILE__, __LINE__); +#endif + + for(i = 0; i < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; i++) + { + if (!psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i]) + break; + ion_free(gpsIONClient, psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i]); + psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i] = IMG_NULL; + } + + /* free copy of page list, originals are freed by ion_free */ + vfree(psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs); + psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs = IMG_NULL; + + LinuxMemAreaStructFree(psLinuxMemArea); +} + +#endif /* defined(CONFIG_ION_OMAP) */ + +struct page* +LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, + IMG_UINT32 ui32ByteOffset) +{ + IMG_UINT32 ui32PageIndex; + IMG_CHAR *pui8Addr; + + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_ALLOC_PAGES: + ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset); + return psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageIndex]; + + case LINUX_MEM_AREA_VMALLOC: + pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; + pui8Addr += ui32ByteOffset; + return vmalloc_to_page(pui8Addr); + + case LINUX_MEM_AREA_SUB_ALLOC: + /* PRQA S 3670 3 */ /* ignore recursive warning */ + return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea, + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + + ui32ByteOffset); + default: + PVR_DPF((PVR_DBG_ERROR, + "%s: Unsupported request for struct page from LinuxMemArea with type=%s", + __FUNCTION__, LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType))); + return NULL; + } +} + + +LinuxKMemCache * +KMemCacheCreateWrapper(IMG_CHAR *pszName, + size_t Size, + size_t Align, + IMG_UINT32 ui32Flags) +{ +#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS) + ui32Flags |= SLAB_POISON|SLAB_RED_ZONE; +#endif + return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) + , NULL +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22) */ + ); +} + + +IMG_VOID +KMemCacheDestroyWrapper(LinuxKMemCache *psCache) +{ + kmem_cache_destroy(psCache); +} + + +IMG_VOID * +_KMemCacheAllocWrapper(LinuxKMemCache *psCache, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) + gfp_t Flags, +#else + IMG_INT Flags, +#endif + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line) +{ + IMG_VOID *pvRet; + + pvRet = kmem_cache_zalloc(psCache, Flags); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, + pvRet, + pvRet, + 0, + psCache, + kmem_cache_size(psCache), + pszFileName, + ui32Line + ); +#else + PVR_UNREFERENCED_PARAMETER(pszFileName); + PVR_UNREFERENCED_PARAMETER(ui32Line); +#endif + + return pvRet; +} + + +LinuxMemArea * +NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea, + IMG_UINT32 ui32ByteOffset, + IMG_UINT32 ui32Bytes) +{ + LinuxMemArea *psLinuxMemArea; + + PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize); + + psLinuxMemArea = LinuxMemAreaStructAlloc(); + if (!psLinuxMemArea) + { + return NULL; + } + + psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC; + psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea; + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset; + psLinuxMemArea->ui32ByteSize = ui32Bytes; + psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags; + psLinuxMemArea->bNeedsCacheInvalidate = psParentLinuxMemArea->bNeedsCacheInvalidate; + INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList); + +#if defined(DEBUG_LINUX_MEM_AREAS) + { + DEBUG_LINUX_MEM_AREA_REC *psParentRecord; + psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea); + DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags); + } +#endif + + return psLinuxMemArea; +} + + +static IMG_VOID +FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC); + +#if defined(DEBUG_LINUX_MEM_AREAS) + DebugLinuxMemAreaRecordRemove(psLinuxMemArea); +#endif + + /* Nothing more to do than free the LinuxMemArea structure */ + + LinuxMemAreaStructFree(psLinuxMemArea); +} + + +static LinuxMemArea * +LinuxMemAreaStructAlloc(IMG_VOID) +{ +/* debug */ +#if 0 + LinuxMemArea *psLinuxMemArea; + psLinuxMemArea = kmem_cache_alloc(g_PsLinuxMemAreaCache, GFP_KERNEL); + printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea); + dump_stack(); + return psLinuxMemArea; +#else + return KMemCacheAllocWrapper(g_PsLinuxMemAreaCache, GFP_KERNEL); +#endif +} + + +static IMG_VOID +LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea) +{ + KMemCacheFreeWrapper(g_PsLinuxMemAreaCache, psLinuxMemArea); + /* debug */ + //printk(KERN_ERR "%s(%p)\n", __FUNCTION__, psLinuxMemArea); +} + + +IMG_VOID +LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea) +{ + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_VMALLOC: + FreeVMallocLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_ALLOC_PAGES: + FreeAllocPagesLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_IOREMAP: + FreeIORemapLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_EXTERNAL_KV: + FreeExternalKVLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_IO: + FreeIOLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_SUB_ALLOC: + FreeSubLinuxMemArea(psLinuxMemArea); + break; + case LINUX_MEM_AREA_ION: + FreeIONLinuxMemArea(psLinuxMemArea); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n", + __FUNCTION__, psLinuxMemArea->eAreaType)); + break; + } +} + + +#if defined(DEBUG_LINUX_MEM_AREAS) +static IMG_VOID +DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags) +{ + DEBUG_LINUX_MEM_AREA_REC *psNewRecord; + const IMG_CHAR *pi8FlagsString; + + LinuxLockMutex(&g_sDebugMutex); + + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize; + if (g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark) + { + g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark; + } + } + g_LinuxMemAreaCount++; + + /* Create a new memory allocation record */ + psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL); + if (psNewRecord) + { + /* Record the allocation */ + psNewRecord->psLinuxMemArea = psLinuxMemArea; + psNewRecord->ui32Flags = ui32Flags; + psNewRecord->pid = OSGetCurrentProcessIDKM(); + + List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords, psNewRecord); + } + else + { + PVR_DPF((PVR_DBG_ERROR, + "%s: failed to allocate linux memory area record.", + __FUNCTION__)); + } + + /* Sanity check the flags */ + pi8FlagsString = HAPFlagsToString(ui32Flags); + if (strstr(pi8FlagsString, "UNKNOWN")) + { + PVR_DPF((PVR_DBG_ERROR, + "%s: Unexpected flags (0x%08x) associated with psLinuxMemArea @ %p", + __FUNCTION__, + ui32Flags, + psLinuxMemArea)); + //dump_stack(); + } + + LinuxUnLockMutex(&g_sDebugMutex); +} + + + +static IMG_VOID* MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord, + va_list va) +{ + LinuxMemArea *psLinuxMemArea; + + psLinuxMemArea = va_arg(va, LinuxMemArea*); + if (psCurrentRecord->psLinuxMemArea == psLinuxMemArea) + { + return psCurrentRecord; + } + else + { + return IMG_NULL; + } +} + + +static DEBUG_LINUX_MEM_AREA_REC * +DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea) +{ + DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord; + + LinuxLockMutex(&g_sDebugMutex); + psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + MatchLinuxMemArea_AnyVaCb, + psLinuxMemArea); + +/*exit_unlock:*/ + LinuxUnLockMutex(&g_sDebugMutex); + + return psCurrentRecord; +} + + +static IMG_VOID +DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea) +{ + DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord; + + LinuxLockMutex(&g_sDebugMutex); + + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize; + } + g_LinuxMemAreaCount--; + + /* Locate the corresponding allocation entry */ + psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + MatchLinuxMemArea_AnyVaCb, + psLinuxMemArea); + if (psCurrentRecord) + { + /* Unlink the allocation record */ + List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord); + kfree(psCurrentRecord); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n", + __FUNCTION__, psLinuxMemArea)); + } + + LinuxUnLockMutex(&g_sDebugMutex); +} +#endif + + +IMG_VOID * +LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea) +{ + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_VMALLOC: + return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; + case LINUX_MEM_AREA_IOREMAP: + return psLinuxMemArea->uData.sIORemap.pvIORemapCookie; + case LINUX_MEM_AREA_EXTERNAL_KV: + return psLinuxMemArea->uData.sExternalKV.pvExternalKV; + case LINUX_MEM_AREA_SUB_ALLOC: + { + IMG_CHAR *pAddr = + LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea); /* PRQA S 3670 */ /* ignore recursive warning */ + if (!pAddr) + { + return NULL; + } + return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset; + } + default: + return NULL; + } +} + + +IMG_CPU_PHYADDR +LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset) +{ + IMG_CPU_PHYADDR CpuPAddr; + + CpuPAddr.uiAddr = 0; + + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_IOREMAP: + { + CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr; + CpuPAddr.uiAddr += ui32ByteOffset; + break; + } + case LINUX_MEM_AREA_EXTERNAL_KV: + { + if (psLinuxMemArea->uData.sExternalKV.bPhysContig) + { + CpuPAddr = SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr); + CpuPAddr.uiAddr += ui32ByteOffset; + } + else + { + IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset); + IMG_SYS_PHYADDR SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageIndex]; + + CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr); + CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset); + } + break; + } + case LINUX_MEM_AREA_IO: + { + CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr; + CpuPAddr.uiAddr += ui32ByteOffset; + break; + } + case LINUX_MEM_AREA_VMALLOC: + { + IMG_CHAR *pCpuVAddr; + pCpuVAddr = + (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress; + pCpuVAddr += ui32ByteOffset; + CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr); + break; + } + case LINUX_MEM_AREA_ION: + { + IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset); + CpuPAddr = psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs[ui32PageIndex]; + CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset); + break; + } + case LINUX_MEM_AREA_ALLOC_PAGES: + { + struct page *page; + IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset); + page = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageIndex]; + CpuPAddr.uiAddr = page_to_phys(page); + CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset); + break; + } + case LINUX_MEM_AREA_SUB_ALLOC: + { + CpuPAddr = + OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea, + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + + ui32ByteOffset); + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n", + __FUNCTION__, psLinuxMemArea->eAreaType)); + PVR_ASSERT(CpuPAddr.uiAddr); + break; + } + } + + return CpuPAddr; +} + + +IMG_BOOL +LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea) +{ + switch (psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_IOREMAP: + case LINUX_MEM_AREA_IO: + return IMG_TRUE; + + case LINUX_MEM_AREA_EXTERNAL_KV: + return psLinuxMemArea->uData.sExternalKV.bPhysContig; + + case LINUX_MEM_AREA_ION: + case LINUX_MEM_AREA_VMALLOC: + case LINUX_MEM_AREA_ALLOC_PAGES: + return IMG_FALSE; + + case LINUX_MEM_AREA_SUB_ALLOC: + /* PRQA S 3670 1 */ /* ignore recursive warning */ + return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea); + + default: + PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n", + __FUNCTION__, psLinuxMemArea->eAreaType)); + break; + } + return IMG_FALSE; +} + + +const IMG_CHAR * +LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType) +{ + /* Note we explicitly check the types instead of e.g. + * using the type to index an array of strings so + * we remain orthogonal to enum changes */ + switch (eMemAreaType) + { + case LINUX_MEM_AREA_IOREMAP: + return "LINUX_MEM_AREA_IOREMAP"; + case LINUX_MEM_AREA_EXTERNAL_KV: + return "LINUX_MEM_AREA_EXTERNAL_KV"; + case LINUX_MEM_AREA_IO: + return "LINUX_MEM_AREA_IO"; + case LINUX_MEM_AREA_VMALLOC: + return "LINUX_MEM_AREA_VMALLOC"; + case LINUX_MEM_AREA_SUB_ALLOC: + return "LINUX_MEM_AREA_SUB_ALLOC"; + case LINUX_MEM_AREA_ALLOC_PAGES: + return "LINUX_MEM_AREA_ALLOC_PAGES"; + case LINUX_MEM_AREA_ION: + return "LINUX_MEM_AREA_ION"; + default: + PVR_ASSERT(0); + } + + return ""; +} + + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, IMG_BOOL start) +{ + if (start) + { + LinuxLockMutex(&g_sDebugMutex); + } + else + { + LinuxUnLockMutex(&g_sDebugMutex); + } +} +#endif /* defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) */ + +#if defined(DEBUG_LINUX_MEM_AREAS) + +static IMG_VOID* DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psNode, va_list va) +{ + off_t *pOff = va_arg(va, off_t*); + if (--(*pOff)) + { + return IMG_NULL; + } + else + { + return psNode; + } +} + +/* seq_file version of generating output, for reference check proc.c:CreateProcReadEntrySeq */ +static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off) +{ + DEBUG_LINUX_MEM_AREA_REC *psRecord; + psRecord = (DEBUG_LINUX_MEM_AREA_REC*) + List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + DecOffMemAreaRec_AnyVaCb, + &off); + return (void*)psRecord; +} + +static void* ProcSeqOff2ElementMemArea(struct seq_file * sfile, loff_t off) +{ + DEBUG_LINUX_MEM_AREA_REC *psRecord; + if (!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + psRecord = (DEBUG_LINUX_MEM_AREA_REC*) + List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords, + DecOffMemAreaRec_AnyVaCb, + &off); + return (void*)psRecord; +} + + +static void ProcSeqShowMemArea(struct seq_file *sfile,void* el) +{ + DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC*)el; + if (el == PVR_PROC_SEQ_START_TOKEN) + { + +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + seq_printf(sfile, + "Number of Linux Memory Areas: %u\n" + "At the current water mark these areas correspond to %u bytes (excluding SUB areas)\n" + "At the highest water mark these areas corresponded to %u bytes (excluding SUB areas)\n" + "\nDetails for all Linux Memory Areas:\n" + "%s %-24s %s %s %-8s %-5s %s\n", + g_LinuxMemAreaCount, + g_LinuxMemAreaWaterMark, + g_LinuxMemAreaHighWaterMark, + "psLinuxMemArea", + "LinuxMemType", + "CpuVAddr", + "CpuPAddr", + "Bytes", + "Pid", + "Flags" + ); +#else + seq_printf(sfile, + "<mem_areas_header>\n" + "\t<count>%u</count>\n" + "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%u\"/>\n" /* (excluding SUB areas) */ + "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%u\"/>\n" /* (excluding SUB areas) */ + "</mem_areas_header>\n", + g_LinuxMemAreaCount, + g_LinuxMemAreaWaterMark, + g_LinuxMemAreaHighWaterMark + ); +#endif + return; + } + + seq_printf(sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%8p %-24s %8p %08x %-8d %-5u %08x=(%s)\n", +#else + "<linux_mem_area>\n" + "\t<pointer>%8p</pointer>\n" + "\t<type>%s</type>\n" + "\t<cpu_virtual>%8p</cpu_virtual>\n" + "\t<cpu_physical>%08x</cpu_physical>\n" + "\t<bytes>%d</bytes>\n" + "\t<pid>%u</pid>\n" + "\t<flags>%08x</flags>\n" + "\t<flags_string>%s</flags_string>\n" + "</linux_mem_area>\n", +#endif + psRecord->psLinuxMemArea, + LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType), + LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea), + LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr, + psRecord->psLinuxMemArea->ui32ByteSize, + psRecord->pid, + psRecord->ui32Flags, + HAPFlagsToString(psRecord->ui32Flags) + ); + +} + +#endif /* DEBUG_LINUX_MEM_AREAS */ + + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + +static IMG_VOID* DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC *psNode, va_list va) +{ + off_t *pOff = va_arg(va, off_t*); + if (--(*pOff)) + { + return IMG_NULL; + } + else + { + return psNode; + } +} + + +/* seq_file version of generating output, for reference check proc.c:CreateProcReadEntrySeq */ +static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off) +{ + DEBUG_MEM_ALLOC_REC *psRecord; + psRecord = (DEBUG_MEM_ALLOC_REC*) + List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords, + DecOffMemAllocRec_AnyVaCb, + &off); +#if defined(DEBUG_LINUX_XML_PROC_FILES) + if (!psRecord) + { + seq_printf(sfile, "</meminfo>\n"); + } +#endif + + return (void*)psRecord; +} + +static void* ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off) +{ + DEBUG_MEM_ALLOC_REC *psRecord; + if (!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + psRecord = (DEBUG_MEM_ALLOC_REC*) + List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords, + DecOffMemAllocRec_AnyVaCb, + &off); + +#if defined(DEBUG_LINUX_XML_PROC_FILES) + if (!psRecord) + { + seq_printf(sfile, "</meminfo>\n"); + } +#endif + + return (void*)psRecord; +} + +static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el) +{ + DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC*)el; + if (el == PVR_PROC_SEQ_START_TOKEN) + { +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + /* NOTE: If you update this code, please also update the XML varient below + * too! */ + + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via kmalloc", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via kmalloc", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via vmalloc", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via vmalloc", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via alloc_pages", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via alloc_pages", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via ioremap", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via ioremap", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes reserved for \"IO\" memory areas", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated for \"IO\" memory areas", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes allocated via kmem_cache_alloc", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes allocated via kmem_cache_alloc", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + seq_printf(sfile, "%-60s: %d bytes\n", + "Current Water Mark of bytes mapped via vmap", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); + seq_printf(sfile, "%-60s: %d bytes\n", + "Highest Water Mark of bytes mapped via vmap", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); +#endif +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + seq_printf(sfile, "%-60s: %d pages\n", + "Number of pages in page pool", + atomic_read(&g_sPagePoolEntryCount)); +#endif + seq_printf( sfile, "\n"); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Current Water Mark for memory allocated from system RAM", + SysRAMTrueWaterMark()); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Highest Water Mark for memory allocated from system RAM", + g_SysRAMHighWaterMark); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Current Water Mark for memory allocated from IO memory", + g_IOMemWaterMark); + seq_printf(sfile, "%-60s: %d bytes\n", + "The Highest Water Mark for memory allocated from IO memory", + g_IOMemHighWaterMark); + + seq_printf( sfile, "\n"); + + seq_printf(sfile, "Details for all known allocations:\n" + "%-16s %-8s %-8s %-10s %-5s %-10s %s\n", + "Type", + "CpuVAddr", + "CpuPAddr", + "Bytes", + "PID", + "PrivateData", + "Filename:Line"); + +#else /* DEBUG_LINUX_XML_PROC_FILES */ + + /* Note: If you want to update the description property of a watermark + * ensure that the key property remains unchanged so that watermark data + * logged over time from different driver revisions may remain comparable + */ + seq_printf(sfile, "<meminfo>\n<meminfo_header>\n"); + seq_printf(sfile, + "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%d\"/>\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]); + seq_printf(sfile, + "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%d\"/>\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]); + seq_printf(sfile, + "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%d\"/>\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, + "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%d\"/>\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); + seq_printf(sfile, + "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%d\"/>\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, + "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%d\"/>\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); + seq_printf(sfile, + "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%d\"/>\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, + "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%d\"/>\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); + seq_printf(sfile, + "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%d\"/>\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, + "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%d\"/>\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); + seq_printf(sfile, + "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%d\"/>\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); + seq_printf(sfile, + "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%d\"/>\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + seq_printf(sfile, + "<watermark key=\"mr12\" description=\"vmap_current\" bytes=\"%d\"/>\n", + g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); + seq_printf(sfile, + "<watermark key=\"mr13\" description=\"vmap_high\" bytes=\"%d\"/>\n", + g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMAP]); +#endif + seq_printf(sfile, + "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%d\"/>\n", + SysRAMTrueWaterMark()); + seq_printf(sfile, + "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%d\"/>\n", + g_SysRAMHighWaterMark); + seq_printf(sfile, + "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%d\"/>\n", + g_IOMemWaterMark); + seq_printf(sfile, + "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%d\"/>\n", + g_IOMemHighWaterMark); + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + seq_printf(sfile, + "<watermark key=\"mr18\" description=\"page_pool_current\" bytes=\"%d\"/>\n", + PAGES_TO_BYTES(atomic_read(&g_sPagePoolEntryCount))); +#endif + seq_printf(sfile, "</meminfo_header>\n"); + +#endif /* DEBUG_LINUX_XML_PROC_FILES */ + return; + } + + if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) + { + seq_printf(sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n", +#else + "<allocation>\n" + "\t<type>%s</type>\n" + "\t<cpu_virtual>%-8p</cpu_virtual>\n" + "\t<cpu_physical>%08x</cpu_physical>\n" + "\t<bytes>%d</bytes>\n" + "\t<pid>%d</pid>\n" + "\t<private>%s</private>\n" + "\t<filename>%s</filename>\n" + "\t<line>%d</line>\n" + "</allocation>\n", +#endif + DebugMemAllocRecordTypeToString(psRecord->eAllocType), + psRecord->pvCpuVAddr, + psRecord->ulCpuPAddr, + psRecord->ui32Bytes, + psRecord->pid, + "NULL", + psRecord->pszFileName, + psRecord->ui32Line); + } + else + { + seq_printf(sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%-16s %-8p %08x %-10d %-5d %-10s %s:%d\n", +#else + "<allocation>\n" + "\t<type>%s</type>\n" + "\t<cpu_virtual>%-8p</cpu_virtual>\n" + "\t<cpu_physical>%08x</cpu_physical>\n" + "\t<bytes>%d</bytes>\n" + "\t<pid>%d</pid>\n" + "\t<private>%s</private>\n" + "\t<filename>%s</filename>\n" + "\t<line>%d</line>\n" + "</allocation>\n", +#endif + DebugMemAllocRecordTypeToString(psRecord->eAllocType), + psRecord->pvCpuVAddr, + psRecord->ulCpuPAddr, + psRecord->ui32Bytes, + psRecord->pid, + KMemCacheNameWrapper(psRecord->pvPrivateData), + psRecord->pszFileName, + psRecord->ui32Line); + } +} + +#endif /* defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) */ + + +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS) +/* This could be moved somewhere more general */ +const IMG_CHAR * +HAPFlagsToString(IMG_UINT32 ui32Flags) +{ + static IMG_CHAR szFlags[50]; + IMG_INT32 i32Pos = 0; + IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex; + IMG_CHAR *apszCacheTypes[] = { + "UNCACHED", + "CACHED", + "WRITECOMBINE", + "UNKNOWN" + }; + IMG_CHAR *apszMapType[] = { + "KERNEL_ONLY", + "SINGLE_PROCESS", + "MULTI_PROCESS", + "FROM_EXISTING_PROCESS", + "NO_CPU_VIRTUAL", + "UNKNOWN" + }; + + /* FIXME create an enum for the cache type that we can + * cast and select so we get compiler warnings when + * when this code isn't complete due to new flags */ + if (ui32Flags & PVRSRV_HAP_UNCACHED) { + ui32CacheTypeIndex = 0; + } else if (ui32Flags & PVRSRV_HAP_CACHED) { + ui32CacheTypeIndex = 1; + } else if (ui32Flags & PVRSRV_HAP_WRITECOMBINE) { + ui32CacheTypeIndex = 2; + } else { + ui32CacheTypeIndex = 3; + PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)", + __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK))); + } + + /* FIXME create an enum for the map type that we can + * cast and select so we get compiler warnings when + * when this code isn't complete due to new flags */ + if (ui32Flags & PVRSRV_HAP_KERNEL_ONLY) { + ui32MapTypeIndex = 0; + } else if (ui32Flags & PVRSRV_HAP_SINGLE_PROCESS) { + ui32MapTypeIndex = 1; + } else if (ui32Flags & PVRSRV_HAP_MULTI_PROCESS) { + ui32MapTypeIndex = 2; + } else if (ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS) { + ui32MapTypeIndex = 3; + } else if (ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL) { + ui32MapTypeIndex = 4; + } else { + ui32MapTypeIndex = 5; + PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)", + __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK))); + } + + i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]); + if (i32Pos <= 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: sprintf for cache type %u failed (%d)", + __FUNCTION__, ui32CacheTypeIndex, i32Pos)); + szFlags[0] = 0; + } + else + { + sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]); + } + + return szFlags; +} +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) +static IMG_VOID LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord) +{ + LinuxMemArea *psLinuxMemArea; + + psLinuxMemArea = psCurrentRecord->psLinuxMemArea; + PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%d bytes", + __FUNCTION__, + psCurrentRecord->psLinuxMemArea, + LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType), + psCurrentRecord->psLinuxMemArea->ui32ByteSize)); + /* Note this will also remove psCurrentRecord from g_LinuxMemAreaRecords + * but that's ok since we have already got a pointer to the next area. */ + LinuxMemAreaDeepFree(psLinuxMemArea); +} +#endif + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +static IMG_VOID LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC *psCurrentRecord) + +{ + +/* It's a bug if anything remains allocated at this point. We + * report an error, and simply brute force free anything we find. */ + PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: " + "type=%s " + "CpuVAddr=%p " + "CpuPAddr=0x%08x, " + "allocated @ file=%s,line=%d", + __FUNCTION__, + DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType), + psCurrentRecord->pvCpuVAddr, + psCurrentRecord->ulCpuPAddr, + psCurrentRecord->pszFileName, + psCurrentRecord->ui32Line)); + switch (psCurrentRecord->eAllocType) + { + case DEBUG_MEM_ALLOC_TYPE_KMALLOC: + KFreeWrapper(psCurrentRecord->pvCpuVAddr); + break; + case DEBUG_MEM_ALLOC_TYPE_IOREMAP: + IOUnmapWrapper(psCurrentRecord->pvCpuVAddr); + break; + case DEBUG_MEM_ALLOC_TYPE_IO: + /* Nothing needed except to free the record */ + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey, __FILE__, __LINE__); + break; + case DEBUG_MEM_ALLOC_TYPE_VMALLOC: + VFreeWrapper(psCurrentRecord->pvCpuVAddr); + break; + case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES: + DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey, __FILE__, __LINE__); + break; + case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE: + KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr); + break; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + case DEBUG_MEM_ALLOC_TYPE_VMAP: + VUnmapWrapper(psCurrentRecord->pvCpuVAddr); + break; +#endif + default: + PVR_ASSERT(0); + } +} +#endif + + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) +static struct shrinker g_sShrinker = +{ + .shrink = ShrinkPagePool, + .seeks = DEFAULT_SEEKS +}; + +static IMG_BOOL g_bShrinkerRegistered; +#endif + +IMG_VOID +LinuxMMCleanup(IMG_VOID) +{ +#if defined(DEBUG_LINUX_MEM_AREAS) + { + if (g_LinuxMemAreaCount) + { + PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%d bytes)", + __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark)); + } + + List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords, LinuxMMCleanup_MemAreas_ForEachCb); + + if (g_SeqFileMemArea) + { + RemoveProcEntrySeq(g_SeqFileMemArea); + } + } +#endif + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) + if (g_bShrinkerRegistered) + { + unregister_shrinker(&g_sShrinker); + } +#endif + + /* + * The page pool must be freed after any remaining mem areas, but before + * the remaining memory resources. + */ + FreePagePool(); + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + { + + /* + * It's a bug if anything remains allocated at this point. We + * report an error, and simply brute force free anything we find. + */ + List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords, LinuxMMCleanup_MemRecords_ForEachVa); + + if (g_SeqFileMemoryRecords) + { + RemoveProcEntrySeq(g_SeqFileMemoryRecords); + } + } +#endif + + if (g_PsLinuxMemAreaCache) + { + KMemCacheDestroyWrapper(g_PsLinuxMemAreaCache); + } + + if (g_PsLinuxPagePoolCache) + { + KMemCacheDestroyWrapper(g_PsLinuxPagePoolCache); + } +} + +PVRSRV_ERROR +LinuxMMInit(IMG_VOID) +{ +#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + LinuxInitMutex(&g_sDebugMutex); +#endif + +#if defined(DEBUG_LINUX_MEM_AREAS) + { + g_SeqFileMemArea = CreateProcReadEntrySeq( + "mem_areas", + NULL, + ProcSeqNextMemArea, + ProcSeqShowMemArea, + ProcSeqOff2ElementMemArea, + ProcSeqStartstopDebugMutex + ); + if (!g_SeqFileMemArea) + { + goto failed; + } + } +#endif + + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + { + g_SeqFileMemoryRecords = CreateProcReadEntrySeq( + "meminfo", + NULL, + ProcSeqNextMemoryRecords, + ProcSeqShowMemoryRecords, + ProcSeqOff2ElementMemoryRecords, + ProcSeqStartstopDebugMutex + ); + if (!g_SeqFileMemoryRecords) + { + goto failed; + } + } +#endif + + g_PsLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0); + if (!g_PsLinuxMemAreaCache) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate mem area kmem_cache", __FUNCTION__)); + goto failed; + } + +#if (PVR_LINUX_MEM_AREA_POOL_MAX_PAGES != 0) + g_iPagePoolMaxEntries = PVR_LINUX_MEM_AREA_POOL_MAX_PAGES; + if (g_iPagePoolMaxEntries <= 0 || g_iPagePoolMaxEntries > INT_MAX/2) + { + g_iPagePoolMaxEntries = INT_MAX/2; + PVR_TRACE(("%s: No limit set for page pool size", __FUNCTION__)); + } + else + { + PVR_TRACE(("%s: Maximum page pool size: %d", __FUNCTION__, g_iPagePoolMaxEntries)); + } + + g_PsLinuxPagePoolCache = KMemCacheCreateWrapper("img-mm-pool", sizeof(LinuxPagePoolEntry), 0, 0); + if (!g_PsLinuxPagePoolCache) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate page pool kmem_cache", __FUNCTION__)); + goto failed; + } +#endif + +#if defined(PVR_LINUX_MEM_AREA_POOL_ALLOW_SHRINK) + register_shrinker(&g_sShrinker); + g_bShrinkerRegistered = IMG_TRUE; +#endif + + return PVRSRV_OK; + +failed: + LinuxMMCleanup(); + return PVRSRV_ERROR_OUT_OF_MEMORY; +} + diff --git a/pvr-source/services4/srvkm/env/linux/mm.h b/pvr-source/services4/srvkm/env/linux/mm.h new file mode 100644 index 0000000..5c01322 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mm.h @@ -0,0 +1,751 @@ +/*************************************************************************/ /*! +@Title Linux Memory Management. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares various memory management utility functions + for Linux. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __IMG_LINUX_MM_H__ +#define __IMG_LINUX_MM_H__ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/list.h> + +#include <asm/io.h> + +#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT) +#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT) + +#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT) + +#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1)) + +#define PAGES_TO_BYTES(pages) ((pages) << PAGE_SHIFT) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)) +#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot) +#else +#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)) +#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot) +#else +#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page) +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)) +#define VM_INSERT_PAGE(vma, addr, page) remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, vma->vm_page_prot); +#else +#define VM_INSERT_PAGE(vma, addr, page) remap_page_range(vma, addr, page_to_phys(page), PAGE_SIZE, vma->vm_page_prot); +#endif +#endif + +static inline IMG_UINT32 VMallocToPhys(IMG_VOID *pCpuVAddr) +{ + return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr)); + +} + +typedef enum { + LINUX_MEM_AREA_IOREMAP, + LINUX_MEM_AREA_EXTERNAL_KV, + LINUX_MEM_AREA_IO, + LINUX_MEM_AREA_VMALLOC, + LINUX_MEM_AREA_ALLOC_PAGES, + LINUX_MEM_AREA_SUB_ALLOC, + LINUX_MEM_AREA_ION, +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + LINUX_MEM_AREA_VMAP, +#endif + LINUX_MEM_AREA_TYPE_COUNT +}LINUX_MEM_AREA_TYPE; + +typedef struct _LinuxMemArea LinuxMemArea; + + +/* FIXME - describe this structure. */ +struct _LinuxMemArea { + LINUX_MEM_AREA_TYPE eAreaType; + union _uData + { + struct _sIORemap + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + IMG_CPU_PHYADDR CPUPhysAddr; + IMG_VOID *pvIORemapCookie; + }sIORemap; + struct _sExternalKV + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + IMG_BOOL bPhysContig; + union { + /* + * SYSPhysAddr is valid if bPhysContig is true, else + * pSysPhysAddr is valid + */ + IMG_SYS_PHYADDR SysPhysAddr; + IMG_SYS_PHYADDR *pSysPhysAddr; + } uPhysAddr; + IMG_VOID *pvExternalKV; + }sExternalKV; + struct _sIO + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + IMG_CPU_PHYADDR CPUPhysAddr; + }sIO; + struct _sVmalloc + { + /* Note the memory this represents _is_ implicitly + * page aligned _and_ so is its size */ + IMG_VOID *pvVmallocAddress; +#if defined(PVR_LINUX_MEM_AREA_USE_VMAP) + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; +#endif + }sVmalloc; + struct _sPageList + { + /* Note the memory this represents _is_ implicitly + * page aligned _and_ so is its size */ + struct page **ppsPageList; + IMG_HANDLE hBlockPageList; + }sPageList; + struct _sIONTilerAlloc + { + /* Note the memory this represents _is_ implicitly + * page aligned _and_ so is its size */ + IMG_CPU_PHYADDR *pCPUPhysAddrs; + IMG_UINT32 ui32NumValidPlanes; + struct ion_handle *psIONHandle[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES]; + IMG_UINT32 planeOffsets[PVRSRV_MAX_NUMBER_OF_MM_BUFFER_PLANES]; + }sIONTilerAlloc; + struct _sSubAlloc + { + /* Note: The memory this represents is _not_ implicitly + * page aligned, neither is its size */ + LinuxMemArea *psParentLinuxMemArea; + IMG_UINT32 ui32ByteOffset; + }sSubAlloc; + }uData; + + IMG_UINT32 ui32ByteSize; /* Size of memory area */ + + IMG_UINT32 ui32AreaFlags; /* Flags passed at creation time */ + + IMG_BOOL bMMapRegistered; /* Registered with mmap code */ + + IMG_BOOL bNeedsCacheInvalidate; /* Cache should be invalidated on first map? */ + + IMG_HANDLE hBMHandle; /* Handle back to BM for this allocation */ + + /* List entry for global list of areas registered for mmap */ + struct list_head sMMapItem; + + /* + * Head of list of all mmap offset structures associated with this + * memory area. + */ + struct list_head sMMapOffsetStructList; +}; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)) +typedef kmem_cache_t LinuxKMemCache; +#else +typedef struct kmem_cache LinuxKMemCache; +#endif + + +/*! + ******************************************************************************* + * @Function LinuxMMInit + * + * @Description + * + * Initialise linux memory management code. + * This should be called during services initialisation. + * + * @Return none +******************************************************************************/ +PVRSRV_ERROR LinuxMMInit(IMG_VOID); + + +/*! + ******************************************************************************* + * + * @Function LinuxMMCleanup + * + * @Description + * + * Cleanup state for the linux memory management code. + * This should be called at services cleanup. + * + * @Return none +******************************************************************************/ +IMG_VOID LinuxMMCleanup(IMG_VOID); + + +/*! + ******************************************************************************* + * @brief Wrappers for kmalloc/kfree with optional /proc/pvr/km tracking + * They can also be used as more concise replacements for OSAllocMem + * in Linux specific code. + * + * @param ui32ByteSize + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KMallocWrapper(ui32ByteSize, uFlags) _KMallocWrapper(ui32ByteSize, uFlags, __FILE__, __LINE__) +#else +#define KMallocWrapper(ui32ByteSize, uFlags) _KMallocWrapper(ui32ByteSize, uFlags, NULL, 0) +#endif +IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, gfp_t uFlags, IMG_CHAR *szFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param pvCpuVAddr + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__) +#else +#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0) +#endif +IMG_VOID _KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param ui32Bytes + * @param ui32AllocFlags + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__) +#else +#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0) +#endif +IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param pvCpuVAddr + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__) +#else +#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0) +#endif +IMG_VOID _VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief Allocates virtually contiguous pages + * + * @param ui32Bytes number of bytes to reserve + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return Page-aligned address of virtual allocation or NULL on error + ******************************************************************************/ +LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief Deallocates virtually contiguous pages + * + * @param LinuxMemArea from NewVMallocLinuxMemArea + * + ******************************************************************************/ +IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief Reserve physical IO memory and create a CPU virtual mapping for it + * + * @param BasePAddr + * @param ui32Bytes + * @param ui32MappingFlags + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \ + _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__) +#else +#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \ + _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0) +#endif +IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32MappingFlags, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief Reserve physical IO memory and create a CPU virtual mapping for it + * + * @param BasePAddr + * @param ui32Bytes + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ********************************************************************************/ +IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea); + +/*! + ******************************************************************************* + * @brief Register physical memory which already has a CPU virtual mapping + * + * @param pBasePAddr + * @param pvCPUVAddr + * @param bPhysContig + * @param ui32Bytes + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ****************************************************************************** + * @brief Unmaps an IO memory mapping created using IORemap + * + * @param pvIORemapCookie + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define IOUnmapWrapper(pvIORemapCookie) \ + _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__) +#else +#define IOUnmapWrapper(pvIORemapCookie) \ + _IOUnmapWrapper(pvIORemapCookie, NULL, 0) +#endif +IMG_VOID _IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * @param ui32ByteOffset + * + * @return + ******************************************************************************/ +struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset); + + +/*! + ******************************************************************************* + * @brief + * + * @param pszName + * @param Size + * @param Align + * @param ui32Flags + * + * @return + ******************************************************************************/ +LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * + * @return + ******************************************************************************/ +IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache); + + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * @param Flags + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__) +#else +#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) +IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); +#else +IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, int Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); +#endif + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * @param pvObject + * + * @return + ******************************************************************************/ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__) +#else +#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0) +#endif +IMG_VOID _KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line); + + +/*! + ******************************************************************************* + * @brief + * + * @param psCache + * + * @return + ******************************************************************************/ +const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache); + + +/*! + ******************************************************************************* + * @brief + * + * @param BasePAddr + * @param ui32Bytes + * @param ui32AreaFlags Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief + * + * @param ui32Bytes + * @param ui32AreaFlags E.g Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea); + + +#if defined(CONFIG_ION_OMAP) + +/*! + ******************************************************************************* + * @brief + * + * @param ui32Bytes + * @param ui32AreaFlags E.g Heap caching and mapping Flags + * + * @return + ******************************************************************************/ +LinuxMemArea * +NewIONLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags, + IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID FreeIONLinuxMemArea(LinuxMemArea *psLinuxMemArea); + +IMG_INT32 +GetIONLinuxMemAreaInfo(LinuxMemArea *psLinuxMemArea, IMG_UINT32* ui32AddressOffsets, + IMG_UINT32* ui32NumAddr); + +#else /* defined(CONFIG_ION_OMAP) */ + +static inline LinuxMemArea * +NewIONLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags, + IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength) +{ + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32AreaFlags); + PVR_UNREFERENCED_PARAMETER(pvPrivData); + PVR_UNREFERENCED_PARAMETER(ui32PrivDataLength); + BUG(); + return IMG_NULL; +} + +static inline IMG_VOID FreeIONLinuxMemArea(LinuxMemArea *psLinuxMemArea) +{ + PVR_UNREFERENCED_PARAMETER(psLinuxMemArea); + BUG(); +} + +static inline IMG_INT32 +GetIONLinuxMemAreaInfo(LinuxMemArea *psLinuxMemArea, IMG_UINT32* ui32AddressOffsets, + IMG_UINT32* ui32NumAddr); +{ + PVR_UNREFERENCED_PARAMETER(psLinuxMemArea); + PVR_UNREFERENCED_PARAMETER(ui32AddressOffsets); + PVR_UNREFERENCED_PARAMETER(ui32NumAddr); + BUG(); + return -1; +} + +#endif /* defined(CONFIG_ION_OMAP) */ + + +/*! + ******************************************************************************* + * @brief + * + * @param psParentLinuxMemArea + * @param ui32ByteOffset + * @param ui32Bytes + * + * @return + ******************************************************************************/ +LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea, + IMG_UINT32 ui32ByteOffset, + IMG_UINT32 ui32Bytes); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief For debug builds, LinuxMemAreas are tracked in /proc + * + * @param psLinuxMemArea + * + ******************************************************************************/ +#if defined(LINUX_MEM_AREAS_DEBUG) +IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea); +#else +#define LinuxMemAreaRegister(X) +#endif + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * + * @return + ******************************************************************************/ +IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @brief + * + * @param psLinuxMemArea + * @param ui32ByteOffset + * + * @return + ******************************************************************************/ +IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset); + + +#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr) + +/*! + ******************************************************************************* + * @brief Indicate whether a LinuxMemArea is physically contiguous + * + * @param psLinuxMemArea + * + * @return IMG_TRUE if the physical address range is contiguous, else IMG_FALSE + ******************************************************************************/ +IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea); + +/*! + ******************************************************************************* + * @brief Return the real underlying LinuxMemArea + * + * @param psLinuxMemArea + * + * @return The real underlying LinuxMemArea + ******************************************************************************/ +static inline LinuxMemArea * +LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea) +{ + if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) + { + return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea; + } + else + { + return psLinuxMemArea; + } +} + + +/*! + ******************************************************************************* + * @brief Return type of real underlying LinuxMemArea + * + * @param psLinuxMemArea + * + * @return The areas eAreaType or for SUB areas; return the parents eAreaType. + ******************************************************************************/ +static inline LINUX_MEM_AREA_TYPE +LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea) +{ + return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType; +} + + +/*! + ******************************************************************************* + * @brief Converts the enum type of a LinuxMemArea to a const string + * + * @param eMemAreaType + * + * @return const string representation of type + ******************************************************************************/ +const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType); + + +/*! + ******************************************************************************* + * @brief + * + * @param ui32Flags + * + * @return + ******************************************************************************/ +#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS) +const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags); +#endif + +#endif /* __IMG_LINUX_MM_H__ */ + diff --git a/pvr-source/services4/srvkm/env/linux/mmap.c b/pvr-source/services4/srvkm/env/linux/mmap.c new file mode 100644 index 0000000..1a485c4 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mmap.c @@ -0,0 +1,1656 @@ +/*************************************************************************/ /*! +@Title Linux mmap interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) +#include <linux/wrapper.h> +#endif +#include <linux/slab.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include <linux/highmem.h> +#endif +#include <asm/io.h> +#include <asm/page.h> +#include <asm/shmparam.h> +#include <asm/pgtable.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +#include <linux/sched.h> +#include <asm/current.h> +#endif +#if defined(SUPPORT_DRI_DRM) +#include <drm/drmP.h> +#endif + +#include "services_headers.h" + +#include "pvrmmap.h" +#include "mutils.h" +#include "mmap.h" +#include "mm.h" +#include "proc.h" +#include "mutex.h" +#include "handle.h" +#include "perproc.h" +#include "env_perproc.h" +#include "bridged_support.h" +#if defined(SUPPORT_DRI_DRM) +#include "pvr_drm.h" +#endif + +#if !defined(PVR_SECURE_HANDLES) && !defined (SUPPORT_SID_INTERFACE) +#error "The mmap code requires PVR_SECURE_HANDLES" +#endif + +/* WARNING: + * The mmap code has its own mutex, to prevent a possible deadlock, + * when using gPVRSRVLock. + * The Linux kernel takes the mm->mmap_sem before calling the mmap + * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl + * entry point may take mm->mmap_sem during fault handling, or + * before calling get_user_pages. If gPVRSRVLock was used in the + * mmap entry points, a deadlock could result, due to the ioctl + * and mmap code taking the two locks in different orders. + * As a corollary to this, the mmap entry points must not call + * any driver code that relies on gPVRSRVLock is held. + */ +PVRSRV_LINUX_MUTEX g_sMMapMutex; + +static LinuxKMemCache *g_psMemmapCache = NULL; +static LIST_HEAD(g_sMMapAreaList); +static LIST_HEAD(g_sMMapOffsetStructList); +#if defined(DEBUG_LINUX_MMAP_AREAS) +static IMG_UINT32 g_ui32RegisteredAreas = 0; +static IMG_UINT32 g_ui32TotalByteSize = 0; +#endif + + +#if defined(DEBUG_LINUX_MMAP_AREAS) +static struct proc_dir_entry *g_ProcMMap; +#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */ + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +/* + * Now that we are using mmap2 in srvclient, almost (*) the full 32 + * bit offset is available. The range of values is divided into two. + * The first part of the range, from FIRST_PHYSICAL_PFN to + * LAST_PHYSICAL_PFN, is for raw page mappings (VM_PFNMAP). The + * resulting 43 bit (*) physical address range should be enough for + * the current range of processors we support. + * + * NB: (*) -- the above figures assume 4KB page size. The offset + * argument to mmap2() is in units of 4,096 bytes regardless of page + * size. Thus, we lose (PAGE_SHIFT-12) bits of resolution on other + * architectures. + * + * The second part of the range, from FIRST_SPECIAL_PFN to LAST_SPECIAL_PFN, + * is used for all other mappings. These other mappings will always + * consist of pages with associated page structures, and need not + * represent a contiguous range of physical addresses. + * + */ +#define MMAP2_PGOFF_RESOLUTION (32-PAGE_SHIFT+12) +#define RESERVED_PGOFF_BITS 1 +#define MAX_MMAP_HANDLE ((1UL<<(MMAP2_PGOFF_RESOLUTION-RESERVED_PGOFF_BITS))-1) + +#define FIRST_PHYSICAL_PFN 0 +#define LAST_PHYSICAL_PFN (FIRST_PHYSICAL_PFN + MAX_MMAP_HANDLE) +#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1) +#define LAST_SPECIAL_PFN (FIRST_SPECIAL_PFN + MAX_MMAP_HANDLE) + +#else /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */ + +#if PAGE_SHIFT != 12 +#error This build variant has not yet been made non-4KB page-size aware +#endif + +/* + * Since we no longer have to worry about clashes with the mmap + * offsets used for pure PFN mappings (VM_PFNMAP), there is greater + * freedom in choosing the mmap handles. This is useful if the + * mmap offset space has to be shared with another driver component. + */ + +#if defined(PVR_MMAP_OFFSET_BASE) +#define FIRST_SPECIAL_PFN PVR_MMAP_OFFSET_BASE +#else +#define FIRST_SPECIAL_PFN 0x80000000UL +#endif + +#if defined(PVR_NUM_MMAP_HANDLES) +#define MAX_MMAP_HANDLE PVR_NUM_MMAP_HANDLES +#else +#define MAX_MMAP_HANDLE 0x7fffffffUL +#endif + +#endif /* !defined(PVR_MAKE_ALL_PFNS_SPECIAL) */ + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +static inline IMG_BOOL +PFNIsPhysical(IMG_UINT32 pfn) +{ + /* Unsigned, no need to compare >=0 */ + return (/*(pfn >= FIRST_PHYSICAL_PFN) &&*/ (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE; +} + +static inline IMG_BOOL +PFNIsSpecial(IMG_UINT32 pfn) +{ + /* Unsigned, no need to compare <=MAX_UINT */ + return ((pfn >= FIRST_SPECIAL_PFN) /*&& (pfn <= LAST_SPECIAL_PFN)*/) ? IMG_TRUE : IMG_FALSE; +} +#endif + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +static inline IMG_HANDLE +MMapOffsetToHandle(IMG_UINT32 pfn) +{ + if (PFNIsPhysical(pfn)) + { + PVR_ASSERT(PFNIsPhysical(pfn)); + return IMG_NULL; + } + return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN); +} +#endif + +static inline IMG_UINT32 +#if defined (SUPPORT_SID_INTERFACE) +HandleToMMapOffset(IMG_SID hHandle) +#else +HandleToMMapOffset(IMG_HANDLE hHandle) +#endif +{ + IMG_UINT32 ulHandle = (IMG_UINT32)hHandle; + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (PFNIsSpecial(ulHandle)) + { + PVR_ASSERT(PFNIsSpecial(ulHandle)); + return 0; + } +#endif + return ulHandle + FIRST_SPECIAL_PFN; +} + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +/* + * Determine whether physical or special mappings will be used for + * a given memory area. At present, this decision is made on + * whether the mapping represents a contiguous range of physical + * addresses, which is a requirement for raw page mappings (VM_PFNMAP). + * In the VMA structure for such a mapping, vm_pgoff is the PFN + * (page frame number, the physical address divided by the page size) + * of the first page in the VMA. The second page is assumed to have + * PFN (vm_pgoff + 1), the third (vm_pgoff + 2) and so on. + */ +static inline IMG_BOOL +LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea) +{ + return LinuxMemAreaPhysIsContig(psLinuxMemArea); +} +#endif + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) +static inline IMG_UINT32 +GetCurrentThreadID(IMG_VOID) +{ + /* + * The PID is the thread ID, as each thread is a + * seperate process. + */ + return (IMG_UINT32)current->pid; +} +#endif + +/* + * Create an offset structure, which is used to hold per-process + * mmap data. + */ +static PKV_OFFSET_STRUCT +CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize) +{ + PKV_OFFSET_STRUCT psOffsetStruct; +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea)); +#endif + +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + PVR_DPF((PVR_DBG_MESSAGE, + "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8x)", + __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags)); +#endif + + PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC); + + PVR_ASSERT(psLinuxMemArea->bMMapRegistered); + + psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL); + if(psOffsetStruct == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache")); + return IMG_NULL; + } + + psOffsetStruct->ui32MMapOffset = ui32Offset; + + psOffsetStruct->psLinuxMemArea = psLinuxMemArea; + + psOffsetStruct->ui32RealByteSize = ui32RealByteSize; + + /* + * We store the TID in case two threads within a process + * generate the same offset structure, and both end up on the + * list of structures waiting to be mapped, at the same time. + * This could happen if two sub areas within the same page are + * being mapped at the same time. + * The TID allows the mmap entry point to distinguish which + * mapping is being done by which thread. + */ +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + psOffsetStruct->ui32TID = GetCurrentThreadID(); +#endif + psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM(); + +#if defined(DEBUG_LINUX_MMAP_AREAS) + /* Extra entries to support proc filesystem debug info */ + psOffsetStruct->pszName = pszName; +#endif + + list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList); + + return psOffsetStruct; +} + + +static IMG_VOID +DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct) +{ +#ifdef DEBUG + IMG_CPU_PHYADDR CpuPAddr; + CpuPAddr = LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0); +#endif + + list_del(&psOffsetStruct->sAreaItem); + + if (psOffsetStruct->bOnMMapList) + { + list_del(&psOffsetStruct->sMMapItem); + } + +#ifdef DEBUG + PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: " + "psLinuxMemArea=%p, CpuPAddr=0x%08X", __FUNCTION__, + psOffsetStruct->psLinuxMemArea, + CpuPAddr.uiAddr)); +#endif + + KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct); +} + + +/* + * There are no alignment constraints for mapping requests made by user + * mode Services. For this, and potentially other reasons, the + * mapping created for a users request may look different to the + * original request in terms of size and alignment. + * + * This function determines an offset that the user can add to the mapping + * that is _actually_ created which will point to the memory they are + * _really_ interested in. + * + */ +static inline IMG_VOID +DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea, + IMG_UINT32 *pui32RealByteSize, + IMG_UINT32 *pui32ByteOffset) +{ + IMG_UINT32 ui32PageAlignmentOffset; + IMG_CPU_PHYADDR CpuPAddr; + + CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0); + ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr); + + *pui32ByteOffset = ui32PageAlignmentOffset; + + *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset); +} + + +/*! + ******************************************************************************* + + @Function PVRMMapOSMemHandleToMMapData + + @Description + + Determine various parameters needed to mmap a memory area, and to + locate the memory within the mapped area. + + @input psPerProc : Per-process data. + @input hMHandle : Memory handle. + @input pui32MMapOffset : pointer to location for returned mmap offset. + @input pui32ByteOffset : pointer to location for returned byte offset. + @input pui32RealByteSize : pointer to location for returned real byte size. + @input pui32UserVaddr : pointer to location for returned user mode address. + + @output pui32MMapOffset : points to mmap offset to be used in mmap2 sys call. + @output pui32ByteOffset : points to byte offset of start of memory + within mapped area returned by mmap2. + @output pui32RealByteSize : points to size of area to be mapped. + @output pui32UserVAddr : points to user mode address of start of + mapping, or 0 if it hasn't been mapped yet. + + @Return PVRSRV_ERROR : PVRSRV_OK, or error code. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hMHandle, +#else + IMG_HANDLE hMHandle, +#endif + IMG_UINT32 *pui32MMapOffset, + IMG_UINT32 *pui32ByteOffset, + IMG_UINT32 *pui32RealByteSize, + IMG_UINT32 *pui32UserVAddr) +{ + LinuxMemArea *psLinuxMemArea; + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_HANDLE hOSMemHandle; + PVRSRV_ERROR eError; + + LinuxLockMutex(&g_sMMapMutex); + + PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE); + + eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle); + if (eError != PVRSRV_OK) + { +#if defined (SUPPORT_SID_INTERFACE) + PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %x failed", __FUNCTION__, hMHandle)); +#else + PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle)); +#endif + + goto exit_unlock; + } + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + if (psLinuxMemArea && (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ION)) + { + *pui32RealByteSize = psLinuxMemArea->ui32ByteSize; + *pui32ByteOffset = psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[0]; + /* The offsets for the subsequent planes must be co-aligned for user + * space mapping and sgx 544 and later. I.e. + * psLinuxMemArea->uData.sIONTilerAlloc.planeOffsets[n]; + */ + } + else + { + + /* Sparse mappings have to ask the BM for the virtual size */ + if (psLinuxMemArea->hBMHandle) + { + *pui32RealByteSize = BM_GetVirtualSize(psLinuxMemArea->hBMHandle); + *pui32ByteOffset = 0; + } + else + { + DetermineUsersSizeAndByteOffset(psLinuxMemArea, + pui32RealByteSize, + pui32ByteOffset); + } + } + + /* Check whether this memory area has already been mapped */ + list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + if (psPerProc->ui32PID == psOffsetStruct->ui32PID) + { + if (!psLinuxMemArea->hBMHandle) + { + PVR_ASSERT(*pui32RealByteSize == psOffsetStruct->ui32RealByteSize); + } + /* + * User mode locking is required to stop two threads racing to + * map the same memory area. The lock should prevent a + * second thread retrieving mmap data for a given handle, + * before the first thread has done the mmap. + * Without locking, both threads may attempt the mmap, + * and one of them will fail. + */ + *pui32MMapOffset = psOffsetStruct->ui32MMapOffset; + *pui32UserVAddr = psOffsetStruct->ui32UserVAddr; + PVRSRVOffsetStructIncRef(psOffsetStruct); + + eError = PVRSRV_OK; + goto exit_unlock; + } + } + + /* Memory area won't have been mapped yet */ + *pui32UserVAddr = 0; + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea)) + { + *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0); + PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset)); + } + else +#endif + { + *pui32MMapOffset = HandleToMMapOffset(hMHandle); +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset)); +#endif + } + + psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset, *pui32RealByteSize); + if (psOffsetStruct == IMG_NULL) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + goto exit_unlock; + } + + /* + * Offset structures representing physical mappings are added to + * a list, so that they can be located when the memory area is mapped. + */ + list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList); + + psOffsetStruct->bOnMMapList = IMG_TRUE; + + PVRSRVOffsetStructIncRef(psOffsetStruct); + + eError = PVRSRV_OK; + + /* Need to scale up the offset to counter the shifting that + is done in the mmap2() syscall, as it expects the pgoff + argument to be in units of 4,096 bytes irrespective of + page size */ + *pui32MMapOffset = *pui32MMapOffset << (PAGE_SHIFT - 12); + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + + return eError; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapReleaseMMapData + + @Description + + Release mmap data. + + @input psPerProc : Per-process data. + @input hMHandle : Memory handle. + @input pbMUnmap : pointer to location for munmap flag. + @input pui32UserVAddr : pointer to location for user mode address of mapping. + @input pui32ByteSize : pointer to location for size of mapping. + + @Output pbMUnmap : points to flag that indicates whether an munmap is + required. + @output pui32UserVAddr : points to user mode address to munmap. + + @Return PVRSRV_ERROR : PVRSRV_OK, or error code. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hMHandle, +#else + IMG_HANDLE hMHandle, +#endif + IMG_BOOL *pbMUnmap, + IMG_UINT32 *pui32RealByteSize, + IMG_UINT32 *pui32UserVAddr) +{ + LinuxMemArea *psLinuxMemArea; + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_HANDLE hOSMemHandle; + PVRSRV_ERROR eError; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + + LinuxLockMutex(&g_sMMapMutex); + + PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE); + + eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle); + if (eError != PVRSRV_OK) + { +#if defined (SUPPORT_SID_INTERFACE) + PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %x failed", __FUNCTION__, hMHandle)); +#else + PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle %p failed", __FUNCTION__, hMHandle)); +#endif + + goto exit_unlock; + } + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + /* Find the offset structure */ + list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + if (psOffsetStruct->ui32PID == ui32PID) + { + if (psOffsetStruct->ui32RefCount == 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area %p", __FUNCTION__, psOffsetStruct, psLinuxMemArea)); + eError = PVRSRV_ERROR_STILL_MAPPED; + goto exit_unlock; + } + + PVRSRVOffsetStructDecRef(psOffsetStruct); + + *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->ui32UserVAddr != 0)); + + *pui32UserVAddr = (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0; + *pui32RealByteSize = (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0; + + eError = PVRSRV_OK; + goto exit_unlock; + } + } + + /* MMap data not found */ +#if defined (SUPPORT_SID_INTERFACE) + PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %x (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea)); +#else + PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle %p (memory area %p)", __FUNCTION__, hMHandle, psLinuxMemArea)); +#endif + + eError = PVRSRV_ERROR_MAPPING_NOT_FOUND; + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + + return eError; +} + +static inline PKV_OFFSET_STRUCT +FindOffsetStructByOffset(IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize) +{ + PKV_OFFSET_STRUCT psOffsetStruct; +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + IMG_UINT32 ui32TID = GetCurrentThreadID(); +#endif + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + + list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem) + { + if (ui32Offset == psOffsetStruct->ui32MMapOffset && ui32RealByteSize == psOffsetStruct->ui32RealByteSize && psOffsetStruct->ui32PID == ui32PID) + { +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + /* + * If the offset is physical, make sure the thread IDs match, + * as different threads may be mapping different memory areas + * with the same offset. + */ + if (!PFNIsPhysical(ui32Offset) || psOffsetStruct->ui32TID == ui32TID) +#endif + { + return psOffsetStruct; + } + } + } + + return IMG_NULL; +} + + +/* + * Map a memory area into user space. + * Note, the ui32ByteOffset is _not_ implicitly page aligned since + * LINUX_MEM_AREA_SUB_ALLOC LinuxMemAreas have no alignment constraints. + */ +static IMG_BOOL +DoMapToUser(LinuxMemArea *psLinuxMemArea, + struct vm_area_struct* ps_vma, + IMG_UINT32 ui32ByteOffset) +{ + IMG_UINT32 ui32ByteSize; + + if ((psLinuxMemArea->hBMHandle) && (ui32ByteOffset != 0)) + { + /* Partial mapping of sparse allocations should never happen */ + return IMG_FALSE; + } + + if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) + { + return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea), /* PRQA S 3670 */ /* allow recursion */ + ps_vma, + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset); + } + + /* + * Note that ui32ByteSize may be larger than the size of the memory + * area being mapped, as the former is a multiple of the page size. + */ + ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start; + PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0); + +#if defined (__sparc__) + /* + * For LINUX_MEM_AREA_EXTERNAL_KV, we don't know where the address range + * we are being asked to map has come from, that is, whether it is memory + * or I/O. For all architectures other than SPARC, there is no distinction. + * Since we don't currently support SPARC, we won't worry about it. + */ +#error "SPARC not supported" +#endif + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (PFNIsPhysical(ps_vma->vm_pgoff)) + { + IMG_INT result; + + PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea)); + PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) == ps_vma->vm_pgoff); + /* + * Since the memory is contiguous, we can map the whole range in one + * go . + */ + + PVR_ASSERT(psLinuxMemArea->hBMHandle == IMG_NULL); + + result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, ui32ByteSize, ps_vma->vm_page_prot); + + if(result == 0) + { + return IMG_TRUE; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result)); + } +#endif + + { + /* + * Memory may be non-contiguous, so we map the range page, + * by page. Since VM_PFNMAP mappings are assumed to be physically + * contiguous, we can't legally use REMAP_PFN_RANGE (that is, we + * could, but the resulting VMA may confuse other bits of the kernel + * that attempt to interpret it). + * The only alternative is to use VM_INSERT_PAGE, which requires + * finding the page structure corresponding to each page, or + * if mixed maps are supported (VM_MIXEDMAP), vm_insert_mixed. + */ + IMG_UINT32 ulVMAPos; + IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize; + IMG_UINT32 ui32PA; + IMG_UINT32 ui32AdjustedPA = ui32ByteOffset; +#if defined(PVR_MAKE_ALL_PFNS_SPECIAL) + IMG_BOOL bMixedMap = IMG_FALSE; +#endif + /* First pass, validate the page frame numbers */ + for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE) + { + IMG_UINT32 pfn; + IMG_BOOL bMapPage = IMG_TRUE; + + if (psLinuxMemArea->hBMHandle) + { + if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, ui32PA)) + { + bMapPage = IMG_FALSE; + } + } + + if (bMapPage) + { + pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32AdjustedPA); + if (!pfn_valid(pfn)) + { +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x%x", __FUNCTION__, pfn)); + return IMG_FALSE; +#else + bMixedMap = IMG_TRUE; +#endif + } + ui32AdjustedPA += PAGE_SIZE; + } + } + +#if defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (bMixedMap) + { + ps_vma->vm_flags |= VM_MIXEDMAP; + } +#endif + /* Second pass, get the page structures and insert the pages */ + ulVMAPos = ps_vma->vm_start; + ui32AdjustedPA = ui32ByteOffset; + for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE) + { + IMG_UINT32 pfn; + IMG_INT result; + IMG_BOOL bMapPage = IMG_TRUE; + + if (psLinuxMemArea->hBMHandle) + { + /* We have a sparse allocation, check if this page should be mapped */ + if (!BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, ui32PA)) + { + bMapPage = IMG_FALSE; + } + } + + if (bMapPage) + { + pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32AdjustedPA); + +#if defined(PVR_MAKE_ALL_PFNS_SPECIAL) + if (bMixedMap) + { + result = vm_insert_mixed(ps_vma, ulVMAPos, pfn); + if(result != 0) + { + PVR_DPF((PVR_DBG_ERROR,"%s: Error - vm_insert_mixed failed (%d)", __FUNCTION__, result)); + return IMG_FALSE; + } + } + else +#endif + { + struct page *psPage; + + PVR_ASSERT(pfn_valid(pfn)); + + psPage = pfn_to_page(pfn); + + result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage); + if(result != 0) + { + PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result)); + return IMG_FALSE; + } + } + ui32AdjustedPA += PAGE_SIZE; + } + ulVMAPos += PAGE_SIZE; + } + } + + return IMG_TRUE; +} + + +static IMG_VOID +MMapVOpenNoLock(struct vm_area_struct* ps_vma) +{ + PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data; + + PVR_ASSERT(psOffsetStruct != IMG_NULL); + PVR_ASSERT(!psOffsetStruct->bOnMMapList); + + PVRSRVOffsetStructIncMapped(psOffsetStruct); + + if (psOffsetStruct->ui32Mapped > 1) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %u)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped)); + PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0); + } + +#if defined(DEBUG_LINUX_MMAP_AREAS) + + PVR_DPF((PVR_DBG_MESSAGE, + "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %d, ui32Mapped %d", + __FUNCTION__, + psOffsetStruct->psLinuxMemArea, + LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea), + psOffsetStruct->ui32MMapOffset, + psOffsetStruct->ui32Mapped)); +#endif +} + + +/* + * Linux mmap open entry point. + */ +static void +MMapVOpen(struct vm_area_struct* ps_vma) +{ + LinuxLockMutex(&g_sMMapMutex); + + MMapVOpenNoLock(ps_vma); + + LinuxUnLockMutex(&g_sMMapMutex); +} + + +static IMG_VOID +MMapVCloseNoLock(struct vm_area_struct* ps_vma) +{ + PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data; + PVR_ASSERT(psOffsetStruct != IMG_NULL); + +#if defined(DEBUG_LINUX_MMAP_AREAS) + PVR_DPF((PVR_DBG_MESSAGE, + "%s: psLinuxMemArea %p, CpuVAddr %p ui32MMapOffset %d, ui32Mapped %d", + __FUNCTION__, + psOffsetStruct->psLinuxMemArea, + LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea), + psOffsetStruct->ui32MMapOffset, + psOffsetStruct->ui32Mapped)); +#endif + + PVR_ASSERT(!psOffsetStruct->bOnMMapList); + PVRSRVOffsetStructDecMapped(psOffsetStruct); + if (psOffsetStruct->ui32Mapped == 0) + { + if (psOffsetStruct->ui32RefCount != 0) + { + PVR_DPF((PVR_DBG_MESSAGE, "%s: psOffsetStruct %p has non-zero reference count (ui32RefCount = %u). User mode address of start of mapping: 0x%x", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32RefCount, psOffsetStruct->ui32UserVAddr)); + } + + DestroyOffsetStruct(psOffsetStruct); + } + + ps_vma->vm_private_data = NULL; +} + +/* + * Linux mmap close entry point. + */ +static void +MMapVClose(struct vm_area_struct* ps_vma) +{ + LinuxLockMutex(&g_sMMapMutex); + + MMapVCloseNoLock(ps_vma); + + LinuxUnLockMutex(&g_sMMapMutex); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +/* + * This vma operation is used to read data from mmap regions. It is called + * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace + * requests and reads from /proc/<pid>/mem. + */ +static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr, + void *buf, int len, int write) +{ + PKV_OFFSET_STRUCT psOffsetStruct; + LinuxMemArea *psLinuxMemArea; + unsigned long ulOffset; + int iRetVal = -EINVAL; + IMG_VOID *pvKernelAddr; + + LinuxLockMutex(&g_sMMapMutex); + + psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data; + psLinuxMemArea = psOffsetStruct->psLinuxMemArea; + ulOffset = addr - ps_vma->vm_start; + + if (ulOffset+len > psLinuxMemArea->ui32ByteSize) + /* Out of range. We shouldn't get here, because the kernel will do + the necessary checks before calling access_process_vm. */ + goto exit_unlock; + + pvKernelAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); + + if (pvKernelAddr) + { + memcpy(buf, pvKernelAddr+ulOffset, len); + iRetVal = len; + } + else + { + IMG_UINT32 pfn, ui32OffsetInPage; + struct page *page; + + pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ulOffset); + + if (!pfn_valid(pfn)) + goto exit_unlock; + + page = pfn_to_page(pfn); + ui32OffsetInPage = ADDR_TO_PAGE_OFFSET(ulOffset); + + if (ui32OffsetInPage+len > PAGE_SIZE) + /* The region crosses a page boundary */ + goto exit_unlock; + + pvKernelAddr = kmap(page); + memcpy(buf, pvKernelAddr+ui32OffsetInPage, len); + kunmap(page); + + iRetVal = len; + } + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + return iRetVal; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) */ + +static struct vm_operations_struct MMapIOOps = +{ + .open=MMapVOpen, + .close=MMapVClose, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + .access=MMapVAccess, +#endif +}; + + +/*! + ******************************************************************************* + + @Function PVRMMap + + @Description + + Driver mmap entry point. + + @input pFile : unused. + @input ps_vma : pointer to linux memory area descriptor. + + @Return 0, or Linux error code. + + ******************************************************************************/ +int +PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma) +{ + LinuxMemArea *psFlushMemArea = IMG_NULL; + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_UINT32 ui32ByteSize; + IMG_VOID *pvBase = IMG_NULL; + int iRetVal = 0; + IMG_UINT32 ui32ByteOffset = 0; /* Keep compiler happy */ + IMG_UINT32 ui32FlushSize = 0; + + PVR_UNREFERENCED_PARAMETER(pFile); + + LinuxLockMutex(&g_sMMapMutex); + + ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start; + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx," + " and ui32ByteSize %d(0x%08x)", + __FUNCTION__, + ps_vma->vm_pgoff, + ui32ByteSize, ui32ByteSize)); + + psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize); + + if (psOffsetStruct == IMG_NULL) + { +#if defined(SUPPORT_DRI_DRM) + LinuxUnLockMutex(&g_sMMapMutex); + +#if !defined(SUPPORT_DRI_DRM_EXT) + /* Pass unknown requests onto the DRM module */ + return drm_mmap(pFile, ps_vma); +#else + /* + * Indicate to caller that the request is not for us. + * Do not return this error elsewhere in this function, as the + * caller may use it as a clue as to whether the mmap request + * should be passed on to another component (e.g. drm_mmap). + */ + return -ENOENT; +#endif +#else + PVR_UNREFERENCED_PARAMETER(pFile); + + PVR_DPF((PVR_DBG_ERROR, + "%s: Attempted to mmap unregistered area at vm_pgoff 0x%lx", + __FUNCTION__, ps_vma->vm_pgoff)); + iRetVal = -EINVAL; +#endif + goto unlock_and_return; + } + + list_del(&psOffsetStruct->sMMapItem); + psOffsetStruct->bOnMMapList = IMG_FALSE; + + /* Only support shared writeable mappings */ + if (((ps_vma->vm_flags & VM_WRITE) != 0) && + ((ps_vma->vm_flags & VM_SHARED) == 0)) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__)); + iRetVal = -EINVAL; + goto unlock_and_return; + } + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n", + __FUNCTION__, psOffsetStruct->psLinuxMemArea)); + + ps_vma->vm_flags |= VM_RESERVED; + ps_vma->vm_flags |= VM_IO; + + /* + * Disable mremap because our nopage handler assumes all + * page requests have already been validated. + */ + ps_vma->vm_flags |= VM_DONTEXPAND; + + /* Don't allow mapping to be inherited across a process fork */ + ps_vma->vm_flags |= VM_DONTCOPY; + + ps_vma->vm_private_data = (void *)psOffsetStruct; + + switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK) + { + case PVRSRV_HAP_CACHED: + /* This is the default, do nothing. */ + break; + case PVRSRV_HAP_WRITECOMBINE: + ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot); + break; + case PVRSRV_HAP_UNCACHED: + ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__)); + iRetVal = -EINVAL; + goto unlock_and_return; + } + +#if defined(SGX544) && defined(SGX_FEATURE_MP) + /* In OMAP5, the A15 no longer masks an issue with the interconnect. + writecombined access to the Tiler 2D memory will encounter errors due to + interconect bus accesses. This will result in a SIGBUS error with a + "non-line fetch abort". The workaround is to use a shared device + access. */ + if (psOffsetStruct->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ION) + ps_vma->vm_page_prot = __pgprot_modify(ps_vma->vm_page_prot, + L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED); +#endif + + /* Install open and close handlers for ref-counting */ + ps_vma->vm_ops = &MMapIOOps; + + if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0)) + { + iRetVal = -EAGAIN; + goto unlock_and_return; + } + + PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0); + + psOffsetStruct->ui32UserVAddr = ps_vma->vm_start; + + /* Invalidate for the ION memory is performed during the mapping */ + if(psOffsetStruct->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ION) + psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate = IMG_FALSE; + + /* Compute the flush region (if necessary) inside the mmap mutex */ + if(psOffsetStruct->psLinuxMemArea->bNeedsCacheInvalidate) + { + psFlushMemArea = psOffsetStruct->psLinuxMemArea; + + /* Sparse mappings have to ask the BM for the virtual size */ + if (psFlushMemArea->hBMHandle) + { + pvBase = (IMG_VOID *)ps_vma->vm_start; + ui32ByteOffset = 0; + ui32FlushSize = BM_GetVirtualSize(psFlushMemArea->hBMHandle); + } + else + { + IMG_UINT32 ui32DummyByteSize; + + DetermineUsersSizeAndByteOffset(psFlushMemArea, + &ui32DummyByteSize, + &ui32ByteOffset); + + pvBase = (IMG_VOID *)ps_vma->vm_start + ui32ByteOffset; + ui32FlushSize = psFlushMemArea->ui32ByteSize; + } + + psFlushMemArea->bNeedsCacheInvalidate = IMG_FALSE; + } + + /* Call the open routine to increment the usage count */ + MMapVOpenNoLock(ps_vma); + + PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n", + __FUNCTION__, ps_vma->vm_pgoff)); + +unlock_and_return: + if (iRetVal != 0 && psOffsetStruct != IMG_NULL) + { + DestroyOffsetStruct(psOffsetStruct); + } + + LinuxUnLockMutex(&g_sMMapMutex); + + if(psFlushMemArea) + { + OSInvalidateCPUCacheRangeKM(psFlushMemArea, ui32ByteOffset, pvBase, + ui32FlushSize); + } + + return iRetVal; +} + + +#if defined(DEBUG_LINUX_MMAP_AREAS) + +/* + * Lock MMap regions list (called on page start/stop while reading /proc/mmap) + + * sfile : seq_file that handles /proc file + * start : TRUE if it's start, FALSE if it's stop + * +*/ +static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start) +{ + if(start) + { + LinuxLockMutex(&g_sMMapMutex); + } + else + { + LinuxUnLockMutex(&g_sMMapMutex); + } +} + + +/* + * Convert offset (index from KVOffsetTable) to element + * (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element that will be dumped + * +*/ +static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off) +{ + LinuxMemArea *psLinuxMemArea; + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem) + { + PKV_OFFSET_STRUCT psOffsetStruct; + + list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + off--; + if (off == 0) + { + PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea); + return (void*)psOffsetStruct; + } + } + } + return (void*)0; +} + +/* + * Gets next MMap element to show. (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element to show (0 ends iteration) +*/ +static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off) +{ + return ProcSeqOff2ElementMMapRegistrations(sfile,off); +} + + +/* + * Show MMap element (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * +*/ +static void ProcSeqShowMMapRegistrations(struct seq_file *sfile, void *el) +{ + KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el; + LinuxMemArea *psLinuxMemArea; + IMG_UINT32 ui32RealByteSize; + IMG_UINT32 ui32ByteOffset; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf( sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "Allocations registered for mmap: %u\n" + "In total these areas correspond to %u bytes\n" + "psLinuxMemArea " + "UserVAddr " + "KernelVAddr " + "CpuPAddr " + "MMapOffset " + "ByteLength " + "LinuxMemType " + "Pid Name Flags\n", +#else + "<mmap_header>\n" + "\t<count>%u</count>\n" + "\t<bytes>%u</bytes>\n" + "</mmap_header>\n", +#endif + g_ui32RegisteredAreas, + g_ui32TotalByteSize + ); + return; + } + + psLinuxMemArea = psOffsetStruct->psLinuxMemArea; + + DetermineUsersSizeAndByteOffset(psLinuxMemArea, + &ui32RealByteSize, + &ui32ByteOffset); + + seq_printf( sfile, +#if !defined(DEBUG_LINUX_XML_PROC_FILES) + "%-8p %08x %-8p %08x %08x %-8d %-24s %-5u %-8s %08x(%s)\n", +#else + "<mmap_record>\n" + "\t<pointer>%-8p</pointer>\n" + "\t<user_virtual>%-8x</user_virtual>\n" + "\t<kernel_virtual>%-8p</kernel_virtual>\n" + "\t<cpu_physical>%08x</cpu_physical>\n" + "\t<mmap_offset>%08x</mmap_offset>\n" + "\t<bytes>%-8d</bytes>\n" + "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n" + "\t<pid>%-5u</pid>\n" + "\t<name>%-8s</name>\n" + "\t<flags>%08x</flags>\n" + "\t<flags_string>%s</flags_string>\n" + "</mmap_record>\n", +#endif + psLinuxMemArea, + psOffsetStruct->ui32UserVAddr + ui32ByteOffset, + LinuxMemAreaToCpuVAddr(psLinuxMemArea), + LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr, + psOffsetStruct->ui32MMapOffset, + psLinuxMemArea->ui32ByteSize, + LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType), + psOffsetStruct->ui32PID, + psOffsetStruct->pszName, + psLinuxMemArea->ui32AreaFlags, + HAPFlagsToString(psLinuxMemArea->ui32AreaFlags)); +} + +#endif + + +/*! + ******************************************************************************* + + @Function PVRMMapRegisterArea + + @Description + + Register a memory area with the mmap code. + + @input psLinuxMemArea : pointer to memory area. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea) +{ + PVRSRV_ERROR eError; +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea)); +#endif + + LinuxLockMutex(&g_sMMapMutex); + +#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS) + PVR_DPF((PVR_DBG_MESSAGE, + "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8x)", + __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags)); +#endif + + PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC); + + /* Check this mem area hasn't already been registered */ + if(psLinuxMemArea->bMMapRegistered) + { + PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered", + __FUNCTION__, psLinuxMemArea)); + eError = PVRSRV_ERROR_INVALID_PARAMS; + goto exit_unlock; + } + + list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList); + + psLinuxMemArea->bMMapRegistered = IMG_TRUE; + +#if defined(DEBUG_LINUX_MMAP_AREAS) + g_ui32RegisteredAreas++; + /* + * Sub memory areas are excluded from g_ui32TotalByteSize so that we + * don't count memory twice, once for the parent and again for sub + * allocationis. + */ + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize; + } +#endif + + eError = PVRSRV_OK; + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + + return eError; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapRemoveRegisterArea + + @Description + + Unregister a memory area with the mmap code. + + @input psLinuxMemArea : pointer to memory area. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea) +{ + PVRSRV_ERROR eError; + PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct; + + LinuxLockMutex(&g_sMMapMutex); + + PVR_ASSERT(psLinuxMemArea->bMMapRegistered); + + list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem) + { + if (psOffsetStruct->ui32Mapped != 0) + { + PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %u", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped)); + dump_stack(); + PVRSRVDumpRefCountCCB(); + eError = PVRSRV_ERROR_STILL_MAPPED; + goto exit_unlock; + } + else + { + /* + * An offset structure is created when a call is made to get + * the mmap data for a physical mapping. If the data is never + * used for mmap, we will be left with an umapped offset + * structure. + */ + PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct)); + } + + PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList); + + DestroyOffsetStruct(psOffsetStruct); + } + + list_del(&psLinuxMemArea->sMMapItem); + + psLinuxMemArea->bMMapRegistered = IMG_FALSE; + +#if defined(DEBUG_LINUX_MMAP_AREAS) + g_ui32RegisteredAreas--; + if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC) + { + g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize; + } +#endif + + eError = PVRSRV_OK; + +exit_unlock: + LinuxUnLockMutex(&g_sMMapMutex); + return eError; +} + + +/*! + ******************************************************************************* + + @Function LinuxMMapPerProcessConnect + + @Description + + Per-process mmap initialisation code. + + @input psEnvPerProc : pointer to OS specific per-process data. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR +LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc) +{ + PVR_UNREFERENCED_PARAMETER(psEnvPerProc); + + return PVRSRV_OK; +} + +/*! + ******************************************************************************* + + @Function LinuxMMapPerProcessDisconnect + + @Description + + Per-process mmap deinitialisation code. + + @input psEnvPerProc : pointer to OS specific per-process data. + + ******************************************************************************/ +IMG_VOID +LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc) +{ + PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct; + IMG_BOOL bWarn = IMG_FALSE; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + + PVR_UNREFERENCED_PARAMETER(psEnvPerProc); + + LinuxLockMutex(&g_sMMapMutex); + + list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem) + { + if (psOffsetStruct->ui32PID == ui32PID) + { + if (!bWarn) + { + PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__)); + bWarn = IMG_TRUE; + } + PVR_ASSERT(psOffsetStruct->ui32Mapped == 0); + PVR_ASSERT(psOffsetStruct->bOnMMapList); + + DestroyOffsetStruct(psOffsetStruct); + } + } + + LinuxUnLockMutex(&g_sMMapMutex); +} + + +/*! + ******************************************************************************* + + @Function LinuxMMapPerProcessHandleOptions + + @Description + + Set secure handle options required by mmap code. + + @input psHandleBase : pointer to handle base. + + @Return PVRSRV_OK, or PVRSRV_ERROR. + + ******************************************************************************/ +PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + PVRSRV_ERROR eError; + + eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError)); + return eError; + } + + return eError; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapInit + + @Description + + MMap initialisation code + + ******************************************************************************/ +IMG_VOID +PVRMMapInit(IMG_VOID) +{ + LinuxInitMutex(&g_sMMapMutex); + + g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0); + if (!g_psMemmapCache) + { + PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__)); + goto error; + } + +#if defined(DEBUG_LINUX_MMAP_AREAS) + g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL, + ProcSeqNextMMapRegistrations, + ProcSeqShowMMapRegistrations, + ProcSeqOff2ElementMMapRegistrations, + ProcSeqStartstopMMapRegistations + ); +#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */ + return; + +error: + PVRMMapCleanup(); + return; +} + + +/*! + ******************************************************************************* + + @Function PVRMMapCleanup + + @Description + + Mmap deinitialisation code + + ******************************************************************************/ +IMG_VOID +PVRMMapCleanup(IMG_VOID) +{ + PVRSRV_ERROR eError; + + if (!list_empty(&g_sMMapAreaList)) + { + LinuxMemArea *psLinuxMemArea, *psTmpMemArea; + + PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__)); + + PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__)); + list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem) + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError)); + } + PVR_ASSERT(eError == PVRSRV_OK); + + LinuxMemAreaDeepFree(psLinuxMemArea); + } + } + PVR_ASSERT(list_empty((&g_sMMapAreaList))); + +#if defined(DEBUG_LINUX_MMAP_AREAS) + RemoveProcEntrySeq(g_ProcMMap); +#endif /* defined(DEBUG_LINUX_MMAP_AREAS) */ + + if(g_psMemmapCache) + { + KMemCacheDestroyWrapper(g_psMemmapCache); + g_psMemmapCache = NULL; + } +} diff --git a/pvr-source/services4/srvkm/env/linux/mmap.h b/pvr-source/services4/srvkm/env/linux/mmap.h new file mode 100644 index 0000000..7140c13 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mmap.h @@ -0,0 +1,240 @@ +/*************************************************************************/ /*! +@Title Linux mmap interface declaration +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__MMAP_H__) +#define __MMAP_H__ + +#include <linux/mm.h> +#include <linux/list.h> + +#if defined(VM_MIXEDMAP) +/* + * Mixed maps allow us to avoid using raw PFN mappings (VM_PFNMAP) for + * pages without pages structures ("struct page"), giving us more + * freedom in choosing the mmap offset for mappings. Mixed maps also + * allow both the mmap and the wrap code to be simplified somewhat. + */ +#define PVR_MAKE_ALL_PFNS_SPECIAL +#endif + +#include "perproc.h" +#include "mm.h" + +/* + * This structure represents the relationship between an mmap2 file + * offset and a LinuxMemArea for a given process. + */ +typedef struct KV_OFFSET_STRUCT_TAG +{ + /* + * Mapping count. Incremented when the mapping is created, and + * if the mapping is inherited across a process fork. + */ + IMG_UINT32 ui32Mapped; + + /* + * Offset to be passed to mmap2 to map the associated memory area + * into user space. The offset may represent the page frame number + * of the first page in the area (if the area is physically + * contiguous), or it may represent the secure handle associated + * with the area. + */ + IMG_UINT32 ui32MMapOffset; + + IMG_UINT32 ui32RealByteSize; + + /* Memory area associated with this offset structure */ + LinuxMemArea *psLinuxMemArea; + +#if !defined(PVR_MAKE_ALL_PFNS_SPECIAL) + /* ID of the thread that owns this structure */ + IMG_UINT32 ui32TID; +#endif + + /* ID of the process that owns this structure */ + IMG_UINT32 ui32PID; + + /* + * For offsets that represent actual page frame numbers, this structure + * is temporarily put on a list so that it can be found from the + * driver mmap entry point. This flag indicates the structure is + * on the list. + */ + IMG_BOOL bOnMMapList; + + /* Reference count for this structure */ + IMG_UINT32 ui32RefCount; + + /* + * User mode address of start of mapping. This is not necessarily the + * first user mode address of the memory area. + */ + IMG_UINT32 ui32UserVAddr; + + /* Extra entries to support proc filesystem debug info */ +#if defined(DEBUG_LINUX_MMAP_AREAS) + const IMG_CHAR *pszName; +#endif + + /* List entry field for MMap list */ + struct list_head sMMapItem; + + /* List entry field for per-memory area list */ + struct list_head sAreaItem; +}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT; + + + +/*! + ******************************************************************************* + * @Function Mmap initialisation code + ******************************************************************************/ +IMG_VOID PVRMMapInit(IMG_VOID); + + +/*! + ******************************************************************************* + * @Function Mmap de-initialisation code + ******************************************************************************/ +IMG_VOID PVRMMapCleanup(IMG_VOID); + + +/*! + ******************************************************************************* + * @Function Registers a memory area with the mmap code + * + * @Input psLinuxMemArea + * + * @Return PVRSRV_ERROR status + ******************************************************************************/ +PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ******************************************************************************* + * @Function Unregisters a memory area from the mmap code + * + * @Input psLinuxMemArea + * + * @Return PVRSRV_ERROR status + ******************************************************************************/ +PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea); + + +/*! + ****************************************************************************** + * @Function When a userspace services client, requests to map a memory + * area to userspace, this function validates the request and + * returns the details that the client must use when calling mmap(2). + * + * @Input psPerProc Per process data. + * @Input hMHandle Handle associated with the memory to map. + * This is a (secure) handle to the OS specific + * memory handle structure (hOSMemHandle), or + * a handle to a structure that contains the + * memory handle. + * @Output pui32MMapOffset The page aligned offset that the client must + * pass to the mmap2 system call. + * @Output pui32ByteOffset The real mapping that will be created for the + * services client may have a different + * size/alignment from it request. This offset + * is returned to the client and should be added + * to virtual address returned from mmap2 to get + * the first address corresponding to its request. + * @Output pui32RealByteOffset The size that the mapping will really be, + * that the client must also pass to mmap/munmap. + * + * @Output pui32UserVAddr Pointer to returned user mode address of + * mapping. + * @Return PVRSRV_ERROR + ******************************************************************************/ +PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hMHandle, +#else + IMG_HANDLE hMHandle, +#endif + IMG_UINT32 *pui32MMapOffset, + IMG_UINT32 *pui32ByteOffset, + IMG_UINT32 *pui32RealByteSize, + IMG_UINT32 *pui32UserVAddr); + +/*! + ******************************************************************************* + + @Function Release mmap data. + + @Input psPerProc Per-process data. + @Input hMHandle Memory handle. + + @Output pbMUnmap Flag that indicates whether an munmap is + required. + @Output pui32RealByteSize Location for size of mapping. + @Output pui32UserVAddr User mode address to munmap. + + @Return PVRSRV_ERROR + ******************************************************************************/ +PVRSRV_ERROR +PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc, +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hMHandle, +#else + IMG_HANDLE hMHandle, +#endif + IMG_BOOL *pbMUnmap, + IMG_UINT32 *pui32RealByteSize, + IMG_UINT32 *pui32UserVAddr); + +/*! + ******************************************************************************* + * @Function driver mmap entry point + * + * @Input pFile : user file structure + * + * @Input ps_vma : vm area structure + * + * @Return 0 for success, -errno for failure. + ******************************************************************************/ +int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma); + + +#endif /* __MMAP_H__ */ + diff --git a/pvr-source/services4/srvkm/env/linux/module.c b/pvr-source/services4/srvkm/env/linux/module.c new file mode 100644 index 0000000..487069d --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/module.c @@ -0,0 +1,1214 @@ +/*************************************************************************/ /*! +@Title Linux module setup +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#if defined(SUPPORT_DRI_DRM) && !defined(SUPPORT_DRI_DRM_PLUGIN) +#define PVR_MOD_STATIC +#else + /* + * For LDM drivers, define PVR_LDM_MODULE to indicate generic LDM + * support is required, besides indicating the exact support + * required (e.g. platform, or PCI device). + */ + #if defined(LDM_PLATFORM) + #define PVR_LDM_PLATFORM_MODULE + #define PVR_LDM_DEVICE_CLASS + #define PVR_LDM_MODULE + #else + #if defined(LDM_PCI) + #define PVR_LDM_DEVICE_CLASS + #define PVR_LDM_PCI_MODULE + #define PVR_LDM_MODULE + #else + #if defined(SYS_SHARES_WITH_3PKM) + #define PVR_LDM_DEVICE_CLASS + #endif + #endif + #endif +#define PVR_MOD_STATIC static +#endif + +#if defined(PVR_LDM_PLATFORM_PRE_REGISTERED) +#if !defined(NO_HARDWARE) +#define PVR_USE_PRE_REGISTERED_PLATFORM_DEV +#endif +#endif + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> + +#if defined(SUPPORT_DRI_DRM) +#include <drm/drmP.h> +#if defined(PVR_SECURE_DRM_AUTH_EXPORT) +#include "env_perproc.h" +#endif +#endif + +#if defined(PVR_LDM_PLATFORM_MODULE) +#include <linux/platform_device.h> +#endif /* PVR_LDM_PLATFORM_MODULE */ + +#if defined(PVR_LDM_PCI_MODULE) +#include <linux/pci.h> +#endif /* PVR_LDM_PCI_MODULE */ + +#if defined(PVR_LDM_DEVICE_CLASS) +#include <linux/device.h> +#endif /* PVR_LDM_DEVICE_CLASS */ + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) +#include <asm/uaccess.h> +#endif + +#include "img_defs.h" +#include "services.h" +#include "kerneldisplay.h" +#include "kernelbuffer.h" +#include "syscommon.h" +#include "pvrmmap.h" +#include "mutils.h" +#include "mm.h" +#include "mmap.h" +#include "mutex.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "perproc.h" +#include "handle.h" +#include "pvr_bridge_km.h" +#include "proc.h" +#include "pvrmodule.h" +#include "private_data.h" +#include "lock.h" +#include "linkage.h" +#include "buffer_manager.h" + +#if defined(SUPPORT_DRI_DRM) +#include "pvr_drm.h" +#endif +/* + * DRVNAME is the name we use to register our driver. + * DEVNAME is the name we use to register actual device nodes. + */ +#if defined(PVR_LDM_MODULE) +#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME +#endif +#define DEVNAME PVRSRV_MODNAME + +#if defined(SUPPORT_DRI_DRM) +#define PRIVATE_DATA(pFile) ((pFile)->driver_priv) +#else +#define PRIVATE_DATA(pFile) ((pFile)->private_data) +#endif + +/* + * This is all module configuration stuff required by the linux kernel. + */ +MODULE_SUPPORTED_DEVICE(DEVNAME); + +#if defined(PVRSRV_NEED_PVR_DPF) +#include <linux/moduleparam.h> +extern IMG_UINT32 gPVRDebugLevel; +module_param(gPVRDebugLevel, uint, 0644); +MODULE_PARM_DESC(gPVRDebugLevel, "Sets the level of debug output (default 0x7)"); +#endif /* defined(PVRSRV_NEED_PVR_DPF) */ + +#if defined(CONFIG_ION_OMAP) +#include <linux/ion.h> +#include <linux/omap_ion.h> +#include "ion.h" +extern void omap_ion_register_pvr_export(void *); +extern struct ion_device *omap_ion_device; +struct ion_client *gpsIONClient; +EXPORT_SYMBOL(gpsIONClient); +#endif /* defined(CONFIG_ION_OMAP) */ + +/* PRQA S 3207 2 */ /* ignore 'not used' warning */ +EXPORT_SYMBOL(PVRGetDisplayClassJTable); +EXPORT_SYMBOL(PVRGetBufferClassJTable); + +#if defined(PVR_LDM_DEVICE_CLASS) && !defined(SUPPORT_DRI_DRM) +/* + * Device class used for /sys entries (and udev device node creation) + */ +static struct class *psPvrClass; +#endif + +#if !defined(SUPPORT_DRI_DRM) +/* + * This is the major number we use for all nodes in /dev. + */ +static int AssignedMajorNumber; + +/* + * These are the operations that will be associated with the device node + * we create. + * + * With gcc -W, specifying only the non-null members produces "missing + * initializer" warnings. +*/ +static int PVRSRVOpen(struct inode* pInode, struct file* pFile); +static int PVRSRVRelease(struct inode* pInode, struct file* pFile); + +static struct file_operations pvrsrv_fops = +{ + .owner=THIS_MODULE, + .unlocked_ioctl = PVRSRV_BridgeDispatchKM, + .open=PVRSRVOpen, + .release=PVRSRVRelease, + .mmap=PVRMMap, +}; +#endif + +PVRSRV_LINUX_MUTEX gPVRSRVLock; + +/* PID of process being released */ +IMG_UINT32 gui32ReleasePID; + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) +static IMG_UINT32 gPVRPowerLevel; +#endif + +#if defined(PVR_LDM_MODULE) + +#if defined(PVR_LDM_PLATFORM_MODULE) +#define LDM_DEV struct platform_device +#define LDM_DRV struct platform_driver +#endif /*PVR_LDM_PLATFORM_MODULE */ + +#if defined(PVR_LDM_PCI_MODULE) +#define LDM_DEV struct pci_dev +#define LDM_DRV struct pci_driver +#endif /* PVR_LDM_PCI_MODULE */ +/* + * This is the driver interface we support. + */ +#if defined(PVR_LDM_PLATFORM_MODULE) +static int PVRSRVDriverRemove(LDM_DEV *device); +static int PVRSRVDriverProbe(LDM_DEV *device); +#endif +#if defined(PVR_LDM_PCI_MODULE) +static void PVRSRVDriverRemove(LDM_DEV *device); +static int PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id); +#endif +static int PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state); +static void PVRSRVDriverShutdown(LDM_DEV *device); +static int PVRSRVDriverResume(LDM_DEV *device); + +#if defined(PVR_LDM_PCI_MODULE) +/* This structure is used by the Linux module code */ +struct pci_device_id powervr_id_table[] __devinitdata = { + {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID)}, +#if defined (SYS_SGX_DEV1_DEVICE_ID) + {PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV1_DEVICE_ID)}, +#endif + {0} +}; + +MODULE_DEVICE_TABLE(pci, powervr_id_table); +#endif + +#if defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) +static struct platform_device_id powervr_id_table[] __devinitdata = { + {SYS_SGX_DEV_NAME, 0}, + {} +}; +#endif + +static LDM_DRV powervr_driver = { +#if defined(PVR_LDM_PLATFORM_MODULE) + .driver = { + .name = DRVNAME, + }, +#endif +#if defined(PVR_LDM_PCI_MODULE) + .name = DRVNAME, +#endif +#if defined(PVR_LDM_PCI_MODULE) || defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + .id_table = powervr_id_table, +#endif + .probe = PVRSRVDriverProbe, +#if defined(PVR_LDM_PLATFORM_MODULE) + .remove = PVRSRVDriverRemove, +#endif +#if defined(PVR_LDM_PCI_MODULE) + .remove = __devexit_p(PVRSRVDriverRemove), +#endif + .suspend = PVRSRVDriverSuspend, + .resume = PVRSRVDriverResume, + .shutdown = PVRSRVDriverShutdown, +}; + +LDM_DEV *gpsPVRLDMDev; + +#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE) && \ + !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) +static void PVRSRVDeviceRelease(struct device unref__ *pDevice) +{ +} + +static struct platform_device powervr_device = { + .name = DEVNAME, + .id = -1, + .dev = { + .release = PVRSRVDeviceRelease + } +}; +#endif + +/*! +****************************************************************************** + + @Function PVRSRVDriverProbe + + @Description + + See whether a given device is really one we can drive. The platform bus + handler has already established that we should be able to service this device + because of the name match. We probably don't need to do anything else. + + @input pDevice - the device for which a probe is requested + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(PVR_LDM_PLATFORM_MODULE) +static int PVRSRVDriverProbe(LDM_DEV *pDevice) +#endif +#if defined(PVR_LDM_PCI_MODULE) +static int __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id) +#endif +{ + SYS_DATA *psSysData; + + PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice)); + +#if 0 /* INTEGRATION_POINT */ + /* Some systems require device-specific system initialisation. + * E.g. this lets the OS track a device's dependencies on various + * system hardware. + * + * Note: some systems use this to enable HW that SysAcquireData + * will depend on, therefore it must be called first. + */ + if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK) + { + return -EINVAL; + } +#endif + /* SysInitialise only designed to be called once. + */ + psSysData = SysAcquireDataNoCheck(); + if (psSysData == IMG_NULL) + { + gpsPVRLDMDev = pDevice; + if (SysInitialise() != PVRSRV_OK) + { + return -ENODEV; + } + } + +#if defined(CONFIG_ION_OMAP) + gpsIONClient = ion_client_create(omap_ion_device, + 1 << ION_HEAP_TYPE_CARVEOUT | + 1 << OMAP_ION_HEAP_TYPE_TILER | + 1 << ION_HEAP_TYPE_SYSTEM, + "pvr"); + if (IS_ERR_OR_NULL(gpsIONClient)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVDriverProbe: Couldn't create ion client")); + return PTR_ERR(gpsIONClient); + } + omap_ion_register_pvr_export(&PVRSRVExportFDToIONHandles); +#endif /* defined(CONFIG_ION_OMAP) */ + + return 0; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDriverRemove + + @Description + + This call is the opposite of the probe call: it is called when the device is + being removed from the driver's control. See the file $KERNELDIR/drivers/ + base/bus.c:device_release_driver() for the call to this function. + + This is the correct place to clean up anything our driver did while it was + asoociated with the device. + + @input pDevice - the device for which driver detachment is happening + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined (PVR_LDM_PLATFORM_MODULE) +static int PVRSRVDriverRemove(LDM_DEV *pDevice) +#endif +#if defined(PVR_LDM_PCI_MODULE) +static void __devexit PVRSRVDriverRemove(LDM_DEV *pDevice) +#endif +{ + SYS_DATA *psSysData; + + PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice)); + +#if defined(CONFIG_ION_OMAP) + ion_client_destroy(gpsIONClient); + gpsIONClient = IMG_NULL; +#endif + + SysAcquireData(&psSysData); + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) + if (gPVRPowerLevel != 0) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) + { + gPVRPowerLevel = 0; + } + } +#endif + (void) SysDeinitialise(psSysData); + + gpsPVRLDMDev = IMG_NULL; + +#if 0 /* INTEGRATION_POINT */ + /* See previous integration point for details. */ + if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK) + { + return -EINVAL; + } +#endif + +#if defined (PVR_LDM_PLATFORM_MODULE) + return 0; +#endif +#if defined (PVR_LDM_PCI_MODULE) + return; +#endif +} +#endif /* defined(PVR_LDM_MODULE) */ + + +#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) +static PVRSRV_LINUX_MUTEX gsPMMutex; +static IMG_BOOL bDriverIsSuspended; +static IMG_BOOL bDriverIsShutdown; +#endif + +#if defined(PVR_LDM_MODULE) || defined(PVR_DRI_DRM_PLATFORM_DEV) +/*! +****************************************************************************** + + @Function PVRSRVDriverShutdown + + @Description + + Suspend device operation for system shutdown. This is called as part of the + system halt/reboot process. The driver is put into a quiescent state by + setting the power state to D3. + + @input pDevice - the device for which shutdown is requested + + @Return nothing + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +void PVRSRVDriverShutdown(struct drm_device *pDevice) +#else +PVR_MOD_STATIC void PVRSRVDriverShutdown(LDM_DEV *pDevice) +#endif +{ + PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice)); + + LinuxLockMutex(&gsPMMutex); + + if (!bDriverIsShutdown && !bDriverIsSuspended) + { + /* + * Take the bridge mutex, and never release it, to stop + * processes trying to use the driver after it has been + * shutdown. + */ + LinuxLockMutex(&gPVRSRVLock); + + (void) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3); + } + + bDriverIsShutdown = IMG_TRUE; + + /* The bridge mutex is held on exit */ + LinuxUnLockMutex(&gsPMMutex); +} + +#endif /* defined(PVR_LDM_MODULE) || defined(PVR_DRI_DRM_PLATFORM_DEV) */ + + +#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) +/*! +****************************************************************************** + + @Function PVRSRVDriverSuspend + + @Description + + For 2.6 kernels: + Suspend device operation. We always get three calls to this regardless of + the state (D1-D3) chosen. The order is SUSPEND_DISABLE, SUSPEND_SAVE_STATE + then SUSPEND_POWER_DOWN. We take action as soon as we get the disable call, + the other states not being handled by us yet. + + For MontaVista 2.4 kernels: + This call gets made once only when someone does something like + + # echo -e -n "suspend powerdown 0" >/sys.devices/legacy/pvrsrv0/power + + The 3rd, numeric parameter (0) in the above has no relevence and is not + passed into us. The state parameter is always zero and the level parameter + is always SUSPEND_POWER_DOWN. Vive la difference! + + @input pDevice - the device for which resume is requested + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(SUPPORT_DRM_MODESET) +int PVRSRVDriverSuspend(struct pci_dev *pDevice, pm_message_t state) +#else +int PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state) +#endif +#else +PVR_MOD_STATIC int PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state) +#endif +{ + int res = 0; +#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)) + PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice)); + + LinuxLockMutex(&gsPMMutex); + + if (!bDriverIsSuspended && !bDriverIsShutdown) + { + LinuxLockMutex(&gPVRSRVLock); + + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) == PVRSRV_OK) + { + /* The bridge mutex will be held until we resume */ + bDriverIsSuspended = IMG_TRUE; + } + else + { + LinuxUnLockMutex(&gPVRSRVLock); + res = -EINVAL; + } + } + + LinuxUnLockMutex(&gsPMMutex); +#endif + return res; +} + + +/*! +****************************************************************************** + + @Function PVRSRVDriverResume + + @Description + + Resume device operation following a lull due to earlier suspension. It is + implicit we're returning to D0 (fully operational) state. We always get three + calls to this using level thus: RESUME_POWER_ON, RESUME_RESTORE_STATE then + RESUME_ENABLE. On 2.6 kernels We don't do anything until we get the enable + call; on the MontaVista set-up we only ever get the RESUME_POWER_ON call. + + @input pDevice - the device for which resume is requested + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) && !defined(PVR_DRI_DRM_PLATFORM_DEV) && \ + !defined(SUPPORT_DRI_DRM_PLUGIN) +#if defined(SUPPORT_DRM_MODESET) +int PVRSRVDriverResume(struct pci_dev *pDevice) +#else +int PVRSRVDriverResume(struct drm_device *pDevice) +#endif +#else +PVR_MOD_STATIC int PVRSRVDriverResume(LDM_DEV *pDevice) +#endif +{ + int res = 0; +#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)) + PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice)); + + LinuxLockMutex(&gsPMMutex); + + if (bDriverIsSuspended && !bDriverIsShutdown) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) + { + bDriverIsSuspended = IMG_FALSE; + LinuxUnLockMutex(&gPVRSRVLock); + } + else + { + /* The bridge mutex is not released on failure */ + res = -EINVAL; + } + } + + LinuxUnLockMutex(&gsPMMutex); +#endif + return res; +} +#endif /* defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) */ + + +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM) +/* + * If PVR_LDM_PCI_MODULE is defined (and PVR_MANUAL_POWER_CONTROL is *NOT* defined), + * the device can be suspended and resumed without suspending/resuming the + * system, by writing values into the power/state sysfs file for the device. + * To suspend: + * echo -n 2 > power/state + * To Resume: + * echo -n 0 > power/state + * + * The problem with this approach is that the device is usually left + * powered up; it is the responsibility of the bus driver to remove + * the power. + * + * Defining PVR_MANUAL_POWER_CONTROL is intended to make it easier to + * debug power management issues, especially when power is really removed + * from the device. It is easier to debug the driver if it is not being + * suspended/resumed with the rest of the system. + * + * When PVR_MANUAL_POWER_CONTROL is defined, the following proc entry is + * created: + * /proc/pvr/power_control + * The driver suspend/resume entry points defined below no longer suspend or + * resume the device. To suspend the device, type the following: + * echo 2 > /proc/pvr/power_control + * To resume the device, type: + * echo 0 > /proc/pvr/power_control + * + * The following example shows how to suspend/resume the device independently + * of the rest of the system. + * Suspend the device: + * echo 2 > /proc/pvr/power_control + * Suspend the system. Then you should be able to suspend and resume + * as normal. To resume the device type the following: + * echo 0 > /proc/pvr/power_control + */ + +IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data) +{ + IMG_CHAR data_buffer[2]; + IMG_UINT32 PVRPowerLevel; + + if (count != sizeof(data_buffer)) + { + return -EINVAL; + } + else + { + if (copy_from_user(data_buffer, buffer, count)) + return -EINVAL; + if (data_buffer[count - 1] != '\n') + return -EINVAL; + PVRPowerLevel = data_buffer[0] - '0'; + if (PVRPowerLevel != gPVRPowerLevel) + { + if (PVRPowerLevel != 0) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK) + { + return -EINVAL; + } + } + else + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK) + { + return -EINVAL; + } + } + + gPVRPowerLevel = PVRPowerLevel; + } + } + return (count); +} + +void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el) +{ + seq_printf(sfile, "%lu\n", gPVRPowerLevel); +} + +#endif + +/*! +****************************************************************************** + + @Function PVRSRVOpen + + @Description + + Release access the PVR services node - called when a file is closed, whether + at exit or using close(2) system call. + + @input pInode - the inode for the file being openeded + + @input pFile - the file handle data for the actual file being opened + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +int PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile) +#else +static int PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile) +#endif +{ + PVRSRV_FILE_PRIVATE_DATA *psPrivateData; + IMG_HANDLE hBlockAlloc; + int iRet = -ENOMEM; + PVRSRV_ERROR eError; + IMG_UINT32 ui32PID; +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; +#endif + + LinuxLockMutex(&gPVRSRVLock); + + ui32PID = OSGetCurrentProcessIDKM(); + + if (PVRSRVProcessConnect(ui32PID, 0) != PVRSRV_OK) + goto err_unlock; + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID); + if (psEnvPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__)); + goto err_unlock; + } +#endif + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_FILE_PRIVATE_DATA), + (IMG_PVOID *)&psPrivateData, + &hBlockAlloc, + "File Private Data"); + + if(eError != PVRSRV_OK) + goto err_unlock; + +#if defined (SUPPORT_SID_INTERFACE) + psPrivateData->hKernelMemInfo = 0; +#else + psPrivateData->hKernelMemInfo = NULL; +#endif +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + psPrivateData->psDRMFile = pFile; + + list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead); +#endif + psPrivateData->ui32OpenPID = ui32PID; + psPrivateData->hBlockAlloc = hBlockAlloc; + PRIVATE_DATA(pFile) = psPrivateData; + iRet = 0; +err_unlock: + LinuxUnLockMutex(&gPVRSRVLock); + return iRet; +} + + +/*! +****************************************************************************** + + @Function PVRSRVRelease + + @Description + + Release access the PVR services node - called when a file is closed, whether + at exit or using close(2) system call. + + @input pInode - the inode for the file being released + + @input pFile - the file handle data for the actual file being released + + @Return 0 for success or <0 for an error. + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +void PVRSRVRelease(void *pvPrivData) +#else +static int PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile) +#endif +{ + PVRSRV_FILE_PRIVATE_DATA *psPrivateData; + int err = 0; + + LinuxLockMutex(&gPVRSRVLock); + +#if defined(SUPPORT_DRI_DRM) + psPrivateData = (PVRSRV_FILE_PRIVATE_DATA *)pvPrivData; +#else + psPrivateData = PRIVATE_DATA(pFile); +#endif + if (psPrivateData != IMG_NULL) + { +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + list_del(&psPrivateData->sDRMAuthListItem); +#endif + + if(psPrivateData->hKernelMemInfo) + { + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + /* Look up the meminfo we just exported */ + if(PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psKernelMemInfo, + psPrivateData->hKernelMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up export handle", __FUNCTION__)); + err = -EFAULT; + goto err_unlock; + } + + /* Tell the XProc about the export if required */ + if (psKernelMemInfo->sShareMemWorkaround.bInUse) + { + BM_XProcIndexRelease(psKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + } + + /* This drops the psMemInfo refcount bumped on export */ + if(FreeMemCallBackCommon(psKernelMemInfo, 0, + PVRSRV_FREE_CALLBACK_ORIGIN_EXTERNAL) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: FreeMemCallBackCommon failed", __FUNCTION__)); + err = -EFAULT; + goto err_unlock; + } + } + + /* Usually this is the same as OSGetCurrentProcessIDKM(), + * but not necessarily (e.g. fork(), child closes last..) + */ + gui32ReleasePID = psPrivateData->ui32OpenPID; + PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID); + gui32ReleasePID = 0; + + OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_FILE_PRIVATE_DATA), + psPrivateData, psPrivateData->hBlockAlloc); + +#if !defined(SUPPORT_DRI_DRM) + PRIVATE_DATA(pFile) = IMG_NULL; /*nulling shared pointer*/ +#endif + } + +err_unlock: + LinuxUnLockMutex(&gPVRSRVLock); +#if defined(SUPPORT_DRI_DRM) + return; +#else + return err; +#endif +} + + +/*! +****************************************************************************** + + @Function PVRCore_Init + + @Description + + Insert the driver into the kernel. + + The device major number is allocated by the kernel dynamically. This means + that the device node (nominally /dev/pvrsrv) will need to be re-made at boot + time if the number changes between subsequent loads of the module. While the + number often stays constant between loads this is not guaranteed. The node + is made as root on the shell with: + + mknod /dev/pvrsrv c nnn 0 + + where nnn is the major number found in /proc/devices for DEVNAME and also + reported by the PVR_DPF() - look at the boot log using dmesg' to see this). + + Currently the auto-generated script /etc/init.d/rc.pvr handles creation of + the device. In other environments the device may be created either through + devfs or sysfs. + + Readable proc-filesystem entries under /proc/pvr are created with + CreateProcEntries(). These can be read at runtime to get information about + the device (eg. 'cat /proc/pvr/vm') + + __init places the function in a special memory section that the kernel frees + once the function has been run. Refer also to module_init() macro call below. + + @input none + + @Return none + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +int PVRCore_Init(void) +#else +static int __init PVRCore_Init(void) +#endif +{ + int error; +#if !defined(PVR_LDM_MODULE) + PVRSRV_ERROR eError; +#endif +#if !defined(SUPPORT_DRI_DRM) && defined(PVR_LDM_DEVICE_CLASS) + struct device *psDev; +#endif + +#if !defined(SUPPORT_DRI_DRM) + /* + * Must come before attempting to print anything via Services. + * For DRM, the initialisation will already have been done. + */ + PVRDPFInit(); +#endif + PVR_TRACE(("PVRCore_Init")); + +#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM) + LinuxInitMutex(&gsPMMutex); +#endif + LinuxInitMutex(&gPVRSRVLock); + + if (CreateProcEntries ()) + { + error = -ENOMEM; + return error; + } + + if (PVROSFuncInit() != PVRSRV_OK) + { + error = -ENOMEM; + goto init_failed; + } + + PVRLinuxMUtilsInit(); + + if(LinuxMMInit() != PVRSRV_OK) + { + error = -ENOMEM; + goto init_failed; + } + + LinuxBridgeInit(); + + PVRMMapInit(); + +#if defined(PVR_LDM_MODULE) + +#if defined(PVR_LDM_PLATFORM_MODULE) || defined(SUPPORT_DRI_DRM_PLUGIN) + if ((error = platform_driver_register(&powervr_driver)) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error)); + + goto init_failed; + } + +#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + if ((error = platform_device_register(&powervr_device)) != 0) + { + platform_driver_unregister(&powervr_driver); + + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error)); + + goto init_failed; + } +#endif +#endif /* PVR_LDM_PLATFORM_MODULE */ + +#if defined(PVR_LDM_PCI_MODULE) + if ((error = pci_register_driver(&powervr_driver)) != 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error)); + + goto init_failed; + } +#endif /* PVR_LDM_PCI_MODULE */ +#endif /* defined(PVR_LDM_MODULE) */ + +#if !defined(PVR_LDM_MODULE) + /* + * Drivers using LDM, will call SysInitialise in the probe/attach code + */ + if ((eError = SysInitialise()) != PVRSRV_OK) + { + error = -ENODEV; +#if defined(TCF_REV) && (TCF_REV == 110) + if(eError == PVRSRV_ERROR_NOT_SUPPORTED) + { + printk("\nAtlas wrapper (FPGA image) version mismatch"); + error = -ENODEV; + } +#endif + goto init_failed; + } +#endif /* !defined(PVR_LDM_MODULE) */ + +#if !defined(SUPPORT_DRI_DRM) + AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops); + + if (AssignedMajorNumber <= 0) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number")); + + error = -EBUSY; + goto sys_deinit; + } + + PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber)); + +#if defined(PVR_LDM_DEVICE_CLASS) + /* + * This code (using GPL symbols) facilitates automatic device + * node creation on platforms with udev (or similar). + */ + psPvrClass = class_create(THIS_MODULE, "pvr"); + + if (IS_ERR(psPvrClass)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass))); + error = -EBUSY; + goto unregister_device; + } + + psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0), +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) + NULL, +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)) */ + DEVNAME); + if (IS_ERR(psDev)) + { + PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev))); + error = -EBUSY; + goto destroy_class; + } +#endif /* defined(PVR_LDM_DEVICE_CLASS) */ +#endif /* !defined(SUPPORT_DRI_DRM) */ + + return 0; + +#if !defined(SUPPORT_DRI_DRM) +#if defined(PVR_LDM_DEVICE_CLASS) +destroy_class: + class_destroy(psPvrClass); +unregister_device: + unregister_chrdev((IMG_UINT)AssignedMajorNumber, DEVNAME); +#endif +sys_deinit: +#endif +#if defined(PVR_LDM_MODULE) +#if defined(PVR_LDM_PCI_MODULE) + pci_unregister_driver(&powervr_driver); +#endif + +#if defined (PVR_LDM_PLATFORM_MODULE) +#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + platform_device_unregister(&powervr_device); +#endif + platform_driver_unregister(&powervr_driver); +#endif + +#else /* defined(PVR_LDM_MODULE) */ + /* LDM drivers call SysDeinitialise during PVRSRVDriverRemove */ + { + SYS_DATA *psSysData; + + psSysData = SysAcquireDataNoCheck(); + if (psSysData != IMG_NULL) + { + (void) SysDeinitialise(psSysData); + } + } +#endif /* defined(PVR_LDM_MODULE) */ +init_failed: + PVRMMapCleanup(); + LinuxMMCleanup(); + LinuxBridgeDeInit(); + PVROSFuncDeInit(); + RemoveProcEntries(); + + return error; + +} /*PVRCore_Init*/ + + +/*! +***************************************************************************** + + @Function PVRCore_Cleanup + + @Description + + Remove the driver from the kernel. + + There's no way we can get out of being unloaded other than panicking; we + just do everything and plough on regardless of error. + + __exit places the function in a special memory section that the kernel frees + once the function has been run. Refer also to module_exit() macro call below. + + Note that the for LDM on MontaVista kernels, the positioning of the driver + de-registration is the opposite way around than would be suggested by the + registration case or the 2,6 kernel case. This is the correct way to do it + and the kernel panics if you change it. You have been warned. + + @input none + + @Return none + +*****************************************************************************/ +#if defined(SUPPORT_DRI_DRM) +void PVRCore_Cleanup(void) +#else +static void __exit PVRCore_Cleanup(void) +#endif +{ +#if !defined(PVR_LDM_MODULE) + SYS_DATA *psSysData; +#endif + PVR_TRACE(("PVRCore_Cleanup")); + +#if !defined(PVR_LDM_MODULE) + SysAcquireData(&psSysData); +#endif + +#if !defined(SUPPORT_DRI_DRM) + +#if defined(PVR_LDM_DEVICE_CLASS) + device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0)); + class_destroy(psPvrClass); +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) + if ( +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) */ + unregister_chrdev((IMG_UINT)AssignedMajorNumber, DEVNAME) +#if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) + ; +#else /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) */ + ) + { + PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber)); + } +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)) */ +#endif /* !defined(SUPPORT_DRI_DRM) */ + +#if defined(PVR_LDM_MODULE) + +#if defined(PVR_LDM_PCI_MODULE) + pci_unregister_driver(&powervr_driver); +#endif + +#if defined (PVR_LDM_PLATFORM_MODULE) +#if defined(MODULE) && !defined(PVR_USE_PRE_REGISTERED_PLATFORM_DEV) + platform_device_unregister(&powervr_device); +#endif + platform_driver_unregister(&powervr_driver); +#endif + +#else /* defined(PVR_LDM_MODULE) */ +#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) + if (gPVRPowerLevel != 0) + { + if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK) + { + gPVRPowerLevel = 0; + } + } +#endif + /* LDM drivers call SysDeinitialise during PVRSRVDriverRemove */ + (void) SysDeinitialise(psSysData); +#endif /* defined(PVR_LDM_MODULE) */ + + PVRMMapCleanup(); + + LinuxMMCleanup(); + + LinuxBridgeDeInit(); + + PVROSFuncDeInit(); + + RemoveProcEntries(); + + PVR_TRACE(("PVRCore_Cleanup: unloading")); +} + +/* + * These macro calls define the initialisation and removal functions of the + * driver. Although they are prefixed `module_', they apply when compiling + * statically as well; in both cases they define the function the kernel will + * run to start/stop the driver. +*/ +#if !defined(SUPPORT_DRI_DRM) +module_init(PVRCore_Init); +module_exit(PVRCore_Cleanup); +#endif diff --git a/pvr-source/services4/srvkm/env/linux/mutex.c b/pvr-source/services4/srvkm/env/linux/mutex.c new file mode 100644 index 0000000..2cd666f --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mutex.c @@ -0,0 +1,153 @@ +/*************************************************************************/ /*! +@Title Linux mutex interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> +#include <linux/errno.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +#include <linux/mutex.h> +#else +#include <asm/semaphore.h> +#endif +#include <linux/module.h> + +#include <img_defs.h> +#include <services.h> + +#include "mutex.h" + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) + +IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + mutex_init(psPVRSRVMutex); +} + +IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + mutex_lock(psPVRSRVMutex); +} + +PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR) + { + return PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR; + } + else + { + return PVRSRV_OK; + } +} + +IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + return mutex_trylock(psPVRSRVMutex); +} + +IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + mutex_unlock(psPVRSRVMutex); +} + +IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + return (mutex_is_locked(psPVRSRVMutex)) ? IMG_TRUE : IMG_FALSE; +} + + +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) */ + + +IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + init_MUTEX(&psPVRSRVMutex->sSemaphore); + atomic_set(&psPVRSRVMutex->Count, 0); +} + +IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + down(&psPVRSRVMutex->sSemaphore); + atomic_dec(&psPVRSRVMutex->Count); +} + +PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR) + { + /* The process was sent a signal while waiting for the semaphore + * (e.g. a kill signal from userspace) + */ + return PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR; + }else{ + atomic_dec(&psPVRSRVMutex->Count); + return PVRSRV_OK; + } +} + +IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore); + if(Status == 0) + { + atomic_dec(&psPVRSRVMutex->Count); + } + + return Status; +} + +IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + atomic_inc(&psPVRSRVMutex->Count); + up(&psPVRSRVMutex->sSemaphore); +} + +IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex) +{ + IMG_INT32 iCount; + + iCount = atomic_read(&psPVRSRVMutex->Count); + + return (IMG_BOOL)iCount; +} + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) */ + diff --git a/pvr-source/services4/srvkm/env/linux/mutex.h b/pvr-source/services4/srvkm/env/linux/mutex.h new file mode 100644 index 0000000..c590da1 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mutex.h @@ -0,0 +1,90 @@ +/*************************************************************************/ /*! +@Title Linux mutex interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef __INCLUDED_LINUX_MUTEX_H_ +#define __INCLUDED_LINUX_MUTEX_H_ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +#include <linux/mutex.h> +#else +#include <asm/semaphore.h> +#endif + + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) + +typedef struct mutex PVRSRV_LINUX_MUTEX; + +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) */ + + +typedef struct { + struct semaphore sSemaphore; + /* since Linux's struct semaphore is intended to be + * opaque we don't poke inside for the count and + * instead we track it outselves. (So we can implement + * LinuxIsLockedMutex) + */ + atomic_t Count; +}PVRSRV_LINUX_MUTEX; + +#endif + + +extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + +extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex); + + +#endif /* __INCLUDED_LINUX_MUTEX_H_ */ + diff --git a/pvr-source/services4/srvkm/env/linux/mutils.c b/pvr-source/services4/srvkm/env/linux/mutils.c new file mode 100644 index 0000000..8e57476 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mutils.c @@ -0,0 +1,166 @@ +/*************************************************************************/ /*! +@Title Linux memory interface support functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <asm/page.h> +#include <asm/pgtable.h> + +#include "img_defs.h" +#include "pvr_debug.h" +#include "mutils.h" + +#if defined(SUPPORT_LINUX_X86_PAT) +#define PAT_LINUX_X86_WC 1 + +#define PAT_X86_ENTRY_BITS 8 + +#define PAT_X86_BIT_PWT 1U +#define PAT_X86_BIT_PCD 2U +#define PAT_X86_BIT_PAT 4U +#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT) + +static IMG_BOOL g_write_combining_available = IMG_FALSE; + +#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0) + +static inline IMG_UINT +pvr_pat_index(pgprotval_t prot_val) +{ + IMG_UINT ret = 0; + pgprotval_t val = prot_val & _PAGE_CACHE_MASK; + + ret |= PROT_TO_PAT_INDEX(val, PAT); + ret |= PROT_TO_PAT_INDEX(val, PCD); + ret |= PROT_TO_PAT_INDEX(val, PWT); + + return ret; +} + +static inline IMG_UINT +pvr_pat_entry(u64 pat, IMG_UINT index) +{ + return (IMG_UINT)(pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK; +} + +static IMG_VOID +PVRLinuxX86PATProbe(IMG_VOID) +{ + /* + * cpu_has_pat indicates whether PAT support is available on the CPU, + * but doesn't indicate if it has been enabled. + */ + if (cpu_has_pat) /* PRQA S 3335 */ /* ignore 'no function declared' */ + { + u64 pat; + IMG_UINT pat_index; + IMG_UINT pat_entry; + + PVR_TRACE(("%s: PAT available", __FUNCTION__)); + /* + * There is no Linux API for finding out if write combining + * is avaialable through the PAT, so we take the direct + * approach, and see if the PAT MSR contains a write combining + * entry. + */ + rdmsrl(MSR_IA32_CR_PAT, pat); + PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat >> 32))); + PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat))); + + pat_index = pvr_pat_index(_PAGE_CACHE_WC); + PVR_TRACE(("%s: PAT index for write combining: %u", __FUNCTION__, pat_index)); + + pat_entry = pvr_pat_entry(pat, pat_index); + PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC)); + +#if defined(SUPPORT_LINUX_X86_WRITECOMBINE) + g_write_combining_available = (IMG_BOOL)(pat_entry == PAT_LINUX_X86_WC); +#endif + } +#if defined(DEBUG) +#if defined(SUPPORT_LINUX_X86_WRITECOMBINE) + if (g_write_combining_available) + { + PVR_TRACE(("%s: Write combining available via PAT", __FUNCTION__)); + } + else + { + PVR_TRACE(("%s: Write combining not available", __FUNCTION__)); + } +#else /* defined(SUPPORT_LINUX_X86_WRITECOMBINE) */ + PVR_TRACE(("%s: Write combining disabled in driver build", __FUNCTION__)); +#endif /* defined(SUPPORT_LINUX_X86_WRITECOMBINE) */ +#endif /* DEBUG */ +} + +pgprot_t +pvr_pgprot_writecombine(pgprot_t prot) +{ + /* + * It would be worth checking from time to time to see if a + * pgprot_writecombine function (or similar) is introduced on Linux for + * x86 processors. If so, this function, and PVRLinuxX86PATProbe can be + * removed, and a macro used to select between pgprot_writecombine and + * pgprot_noncached, dpending on the value for of + * SUPPORT_LINUX_X86_WRITECOMBINE. + */ + /* PRQA S 0481,0482 2 */ /* scalar expressions */ + return (g_write_combining_available) ? + __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) : pgprot_noncached(prot); +} +#endif /* defined(SUPPORT_LINUX_X86_PAT) */ + +IMG_VOID +PVRLinuxMUtilsInit(IMG_VOID) +{ +#if defined(SUPPORT_LINUX_X86_PAT) + PVRLinuxX86PATProbe(); +#endif +} + diff --git a/pvr-source/services4/srvkm/env/linux/mutils.h b/pvr-source/services4/srvkm/env/linux/mutils.h new file mode 100644 index 0000000..891598c --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/mutils.h @@ -0,0 +1,119 @@ +/*************************************************************************/ /*! +@Title Memory management support utils +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Declares various memory management support functions + for Linux. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __IMG_LINUX_MUTILS_H__ +#define __IMG_LINUX_MUTILS_H__ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#if !(defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))) +#if defined(SUPPORT_LINUX_X86_PAT) +#undef SUPPORT_LINUX_X86_PAT +#endif +#endif + +#if defined(SUPPORT_LINUX_X86_PAT) + pgprot_t pvr_pgprot_writecombine(pgprot_t prot); + #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv) +#else + #if defined(__arm__) || defined(__sh__) + #define PGPROT_WC(pv) pgprot_writecombine(pv) + #else + #if defined(__i386__) || defined(__mips__) + #define PGPROT_WC(pv) pgprot_noncached(pv) + #else + #define PGPROT_WC(pv) pgprot_noncached(pv) + #error Unsupported architecture! + #endif + #endif +#endif + +#define PGPROT_UC(pv) pgprot_noncached(pv) + +#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) + #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes) +#else + #if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) + #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes) + #else + #define IOREMAP(pa, bytes) ioremap(pa, bytes) + #endif +#endif + +#if defined(SUPPORT_LINUX_X86_PAT) + #if defined(SUPPORT_LINUX_X86_WRITECOMBINE) + #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes) + #else + #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes) + #endif +#else + #if defined(__arm__) + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) + #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes) + #else + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes) + #else + #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)) + #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, L_PTE_BUFFERABLE) + #else + #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, , L_PTE_BUFFERABLE, 1) + #endif + #endif + #endif + #else + #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes) + #endif +#endif + +#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes) + +IMG_VOID PVRLinuxMUtilsInit(IMG_VOID); + +#endif /* __IMG_LINUX_MUTILS_H__ */ + diff --git a/pvr-source/services4/srvkm/env/linux/osfunc.c b/pvr-source/services4/srvkm/env/linux/osfunc.c new file mode 100644 index 0000000..ac03185 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/osfunc.c @@ -0,0 +1,4714 @@ +/*************************************************************************/ /*! +@Title Environment related functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <asm/io.h> +#include <asm/page.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +#include <asm/system.h> +#endif +#include <asm/cacheflush.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/hugetlb.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <asm/hardirq.h> +#include <linux/timer.h> +#include <linux/capability.h> +#include <asm/uaccess.h> +#include <linux/spinlock.h> +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \ + defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \ + defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \ + defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) || \ + defined(PVR_LINUX_USING_WORKQUEUES) +#include <linux/workqueue.h> +#endif + +#include "img_types.h" +#include "services_headers.h" +#include "mm.h" +#include "pvrmmap.h" +#include "mmap.h" +#include "env_data.h" +#include "proc.h" +#include "mutex.h" +#include "event.h" +#include "linkage.h" +#include "pvr_uaccess.h" +#include "lock.h" +#include <syslocal.h> + +#if defined (SUPPORT_ION) +#include "ion.h" +#endif + +#if defined (CONFIG_X86_PAE) +#error Physical Address Extension not supported with the driver +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) +#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, wait) +#else +#define ON_EACH_CPU(func, info, wait) on_each_cpu(func, info, 0, wait) +#endif + +#if defined(PVR_LINUX_USING_WORKQUEUES) && !defined(CONFIG_PREEMPT) +/* + * Services spins at certain points waiting for events (e.g. swap + * chain destrucion). If those events rely on workqueues running, + * it needs to be possible to preempt the waiting thread. + * Removing the need for CONFIG_PREEMPT will require adding preemption + * points at various points in Services. + */ +#error "A preemptible Linux kernel is required when using workqueues" +#endif + +#if defined(EMULATOR) +#define EVENT_OBJECT_TIMEOUT_MS (2000) +#else +#define EVENT_OBJECT_TIMEOUT_MS (100) +#endif /* EMULATOR */ + +#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc) +#else +PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line) +#endif +{ + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(phBlockAlloc); + + if (ui32Size > PAGE_SIZE) + { + /* Try to allocate the memory using vmalloc */ +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + *ppvCpuVAddr = _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED, pszFilename, ui32Line); +#else + *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED); +#endif + if (*ppvCpuVAddr) + { + return PVRSRV_OK; + } + } + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + *ppvCpuVAddr = _KMallocWrapper(ui32Size, GFP_KERNEL | __GFP_NOWARN, pszFilename, ui32Line); +#else + *ppvCpuVAddr = KMallocWrapper(ui32Size, GFP_KERNEL | __GFP_NOWARN); +#endif + if (!*ppvCpuVAddr) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + return PVRSRV_OK; +} + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) + +static inline int is_vmalloc_addr(const void *pvCpuVAddr) +{ + unsigned long lAddr = (unsigned long)pvCpuVAddr; + return lAddr >= VMALLOC_START && lAddr < VMALLOC_END; +} + +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) */ + +#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) +PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc) +#else +PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line) +#endif +{ + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(hBlockAlloc); + + if (is_vmalloc_addr(pvCpuVAddr)) + { +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line); +#else + VFreeWrapper(pvCpuVAddr); +#endif + } + else + { +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line); +#else + KFreeWrapper(pvCpuVAddr); +#endif + } + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +OSAllocPages_Impl(IMG_UINT32 ui32AllocFlags, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PageSize, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_HANDLE hBMHandle, + IMG_VOID **ppvCpuVAddr, + IMG_HANDLE *phOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + + PVR_UNREFERENCED_PARAMETER(ui32PageSize); + +#if 0 + /* For debug: force all OSAllocPages allocations to have a kernel + * virtual address */ + if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS) + { + ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS; + ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS; + } +#endif + + if(ui32AllocFlags & PVRSRV_MEM_ION) + { + /* We'll only see HAP_SINGLE_PROCESS with MEM_ION */ + BUG_ON((ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) != PVRSRV_HAP_SINGLE_PROCESS); + + psLinuxMemArea = NewIONLinuxMemArea(ui32Size, ui32AllocFlags, + pvPrivData, ui32PrivDataLength); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + PVRMMapRegisterArea(psLinuxMemArea); + goto ExitSkipSwitch; + } + + switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + { + psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + break; + } + case PVRSRV_HAP_SINGLE_PROCESS: + { + /* Currently PVRSRV_HAP_SINGLE_PROCESS implies that we dont need a + * kernel virtual mapping, but will need a user space virtual mapping */ + + psLinuxMemArea = NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + + case PVRSRV_HAP_MULTI_PROCESS: + { + /* Currently PVRSRV_HAP_MULTI_PROCESS implies that we need a kernel + * virtual mapping and potentially multiple user space virtual + * mappings: Note: these eat into our limited kernel virtual + * address space. */ + +#if defined(VIVT_CACHE) || defined(__sh__) + /* ARM9 caches are tagged with virtual pages, not physical. As we are going to + * share this memory in different address spaces, we don't want it to be cached. + * ARM11 has physical tagging, so we can cache this memory without fear of virtual + * address aliasing in the TLB, as long as the kernel supports cache colouring for + * VIPT architectures. */ + ui32AllocFlags &= ~PVRSRV_HAP_CACHED; +#endif + psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags)); + *ppvCpuVAddr = NULL; + *phOSMemHandle = (IMG_HANDLE)0; + return PVRSRV_ERROR_INVALID_PARAMS; + } + + /* + In case of sparse mapping we need to handle back to the BM as it + knows the mapping info + */ + if (ui32AllocFlags & PVRSRV_MEM_SPARSE) + { + psLinuxMemArea->hBMHandle = hBMHandle; + } + +ExitSkipSwitch: + *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); + *phOSMemHandle = psLinuxMemArea; + + LinuxMemAreaRegister(psLinuxMemArea); + + return PVRSRV_OK; +} + + +PVRSRV_ERROR +OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + break; + case PVRSRV_HAP_SINGLE_PROCESS: + case PVRSRV_HAP_MULTI_PROCESS: + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, + "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%d, " + "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!", + ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle)); + return eError; + } + break; + default: + PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n", + __FUNCTION__, ui32AllocFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + +IMG_INT32 +OSGetMemMultiPlaneInfo(IMG_HANDLE hOSMemHandle, IMG_UINT32* pui32AddressOffsets, + IMG_UINT32* ui32NumAddrOffsets) +{ + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + if(!ui32NumAddrOffsets) + return -1; + + if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ION) + return GetIONLinuxMemAreaInfo(psLinuxMemArea, pui32AddressOffsets, ui32NumAddrOffsets); + + if(!pui32AddressOffsets) + return -1; + + *pui32AddressOffsets = 0; + *ui32NumAddrOffsets = 1; + + return psLinuxMemArea->ui32ByteSize; +} + +PVRSRV_ERROR +OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandleRet) +{ + LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea; + PVRSRV_ERROR eError; + + psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes); + if(!psLinuxMemArea) + { + *phOSMemHandleRet = NULL; + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + *phOSMemHandleRet = psLinuxMemArea; + + /* KERNEL_ONLY areas are never mmapable. */ + if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY) + { + return PVRSRV_OK; + } + + eError = PVRMMapRegisterArea(psLinuxMemArea); + if(eError != PVRSRV_OK) + { + goto failed_register_area; + } + + return PVRSRV_OK; + +failed_register_area: + *phOSMemHandleRet = NULL; + LinuxMemAreaDeepFree(psLinuxMemArea); + return eError; +} + +PVRSRV_ERROR +OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags) +{ + LinuxMemArea *psLinuxMemArea; + PVRSRV_ERROR eError; + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC); + + if((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0) + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if(eError != PVRSRV_OK) + { + return eError; + } + } + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + + +IMG_CPU_PHYADDR +OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset) +{ + PVR_ASSERT(hOSMemHandle); + + return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset); +} + + +IMG_BOOL OSMemHandleIsPhysContig(IMG_VOID *hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + PVR_ASSERT(psLinuxMemArea); + + if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV) + return psLinuxMemArea->uData.sExternalKV.bPhysContig; + + return IMG_FALSE; +} + + +/*! +****************************************************************************** + + @Function OSMemCopy + + @Description Copies memory around + + @Input pvDst - pointer to dst + @Output pvSrc - pointer to src + @Input ui32Size - bytes to copy + + @Return none + +******************************************************************************/ +IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size) +{ +#if defined(USE_UNOPTIMISED_MEMCPY) + IMG_UINT8 *Src,*Dst; + IMG_INT i; + + Src=(IMG_UINT8 *)pvSrc; + Dst=(IMG_UINT8 *)pvDst; + for(i=0;i<ui32Size;i++) + { + Dst[i]=Src[i]; + } +#else + memcpy(pvDst, pvSrc, ui32Size); +#endif +} + + +/*! +****************************************************************************** + + @Function OSMemSet + + @Description Function that does the same as the C memset() functions + + @Modified *pvDest : pointer to start of buffer to be set + + @Input ui8Value: value to set each byte to + + @Input ui32Size : number of bytes to set + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size) +{ +#if defined(USE_UNOPTIMISED_MEMSET) + IMG_UINT8 *Buff; + IMG_INT i; + + Buff=(IMG_UINT8 *)pvDest; + for(i=0;i<ui32Size;i++) + { + Buff[i]=ui8Value; + } +#else + memset(pvDest, (IMG_INT) ui8Value, (size_t) ui32Size); +#endif +} + + +/*! +****************************************************************************** + @Function OSStringCopy + @Description strcpy +******************************************************************************/ +IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc) +{ + return (strcpy(pszDest, pszSrc)); +} + +/*! +****************************************************************************** + @Function OSSNPrintf + @Description snprintf +******************************************************************************/ +IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...) +{ + va_list argList; + IMG_INT32 iCount; + + va_start(argList, pszFormat); + iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList); + va_end(argList); + + return iCount; +} + +/*! +****************************************************************************** + + @Function OSBreakResourceLock + + @Description unlocks an OS dependant resource + + @Input phResource - pointer to OS dependent resource structure + @Input ui32ID - Lock value to look for + + @Return + +******************************************************************************/ +IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID) +{ + volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock; + + if(*pui32Access) + { + if(psResource->ui32ID == ui32ID) + { + psResource->ui32ID = 0; + *pui32Access = 0; + } + else + { + PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process.")); + } + } + else + { + PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked")); + } +} + + +/*! +****************************************************************************** + + @Function OSCreateResource + + @Description creates a OS dependant resource object + + @Input phResource - pointer to OS dependent resource + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource) +{ + psResource->ui32ID = 0; + psResource->ui32Lock = 0; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSDestroyResource + + @Description destroys an OS dependant resource object + + @Input phResource - pointer to OS dependent resource + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource) +{ + OSBreakResourceLock (psResource, psResource->ui32ID); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSInitEnvData + + @Description Allocates space for env specific data + + @Input ppvEnvSpecificData - pointer to pointer in which to return + allocated data. + @Input ui32MMUMode - MMU mode. + + @Return nothing + +******************************************************************************/ +PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData) +{ + ENV_DATA *psEnvData; + PVRSRV_ERROR eError; + + /* allocate env specific data */ + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID **)&psEnvData, IMG_NULL, + "Environment Data"); + if (eError != PVRSRV_OK) + { + return eError; + } + + eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, + &psEnvData->pvBridgeData, IMG_NULL, + "Bridge Data"); + if (eError != PVRSRV_OK) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL); + /*not nulling pointer, out of scope*/ + return eError; + } + + + /* ISR installation flags */ + psEnvData->bMISRInstalled = IMG_FALSE; + psEnvData->bLISRInstalled = IMG_FALSE; + + /* copy structure back */ + *ppvEnvSpecificData = psEnvData; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSDeInitEnvData + + @Description frees env specific data memory + + @Input pvEnvSpecificData - pointer to private structure + + @Return PVRSRV_OK on success else PVRSRV_ERROR_OUT_OF_MEMORY + +******************************************************************************/ +PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData) +{ + ENV_DATA *psEnvData = (ENV_DATA*)pvEnvSpecificData; + + PVR_ASSERT(!psEnvData->bMISRInstalled); + PVR_ASSERT(!psEnvData->bLISRInstalled); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, psEnvData->pvBridgeData, IMG_NULL); + psEnvData->pvBridgeData = IMG_NULL; + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSReleaseThreadQuanta + + @Description + Releases thread quanta + + @Return nothing + +******************************************************************************/ +IMG_VOID OSReleaseThreadQuanta(IMG_VOID) +{ + schedule(); +} + + +/*! +****************************************************************************** + + @Function OSClockus + + @Description + This function returns the clock in microseconds + + @Input void + + @Return - clock (us) + +******************************************************************************/ +IMG_UINT32 OSClockus(IMG_VOID) +{ + IMG_UINT32 time, j = jiffies; + + time = j * (1000000 / HZ); + + return time; +} + + +IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus) +{ + udelay(ui32Timeus); +} + + +IMG_VOID OSSleepms(IMG_UINT32 ui32Timems) +{ + msleep(ui32Timems); +} + + +/*! +****************************************************************************** + + @Function OSFuncHighResTimerCreate + + @Description + This function creates a high res timer who's handle is returned + + @Input nothing + + @Return handle + +******************************************************************************/ +IMG_HANDLE OSFuncHighResTimerCreate(IMG_VOID) +{ + /* We don't need a handle, but we must return non-NULL */ + return (IMG_HANDLE) 1; +} + +/*! +****************************************************************************** + + @Function OSFuncHighResTimerGetus + + @Description + This function returns the current timestamp in us + + @Input nothing + + @Return handle + +******************************************************************************/ +IMG_UINT32 OSFuncHighResTimerGetus(IMG_HANDLE hTimer) +{ + return (IMG_UINT32) jiffies_to_usecs(jiffies); +} + +/*! +****************************************************************************** + + @Function OSFuncHighResTimerDestroy + + @Description + This function will destroy the high res timer + + @Input nothing + + @Return handle + +******************************************************************************/ +IMG_VOID OSFuncHighResTimerDestroy(IMG_HANDLE hTimer) +{ + PVR_UNREFERENCED_PARAMETER(hTimer); +} + +/*! +****************************************************************************** + + @Function OSGetCurrentProcessIDKM + + @Description Returns handle for current process + + @Return ID of current process + +*****************************************************************************/ +IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID) +{ + if (in_interrupt()) + { + return KERNEL_ID; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) + return (IMG_UINT32)current->pgrp; +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) + return (IMG_UINT32)task_tgid_nr(current); +#else + return (IMG_UINT32)current->tgid; +#endif +#endif +} + + +int OSGetProcCmdline(IMG_UINT32 ui32PID, char * buffer, int buff_size) +{ + int res = 0; + unsigned int len; + struct task_struct *task = pid_task(find_vpid(ui32PID), PIDTYPE_PID); + struct mm_struct *mm = task ? get_task_mm(task) : IMG_NULL; + if (!mm) + goto out; + if (!mm->arg_end) + goto out_mm; /* Shh! No looking before we're done */ + + len = mm->arg_end - mm->arg_start; + + if (len > buff_size) + len = buff_size; + + res = pvr_access_process_vm(task, mm->arg_start, buffer, len, 0); + + // If the nul at the end of args has been overwritten, then + // assume application is using setproctitle(3). + if (res > 0 && buffer[res-1] != '\0' && len < buff_size) { + len = strnlen(buffer, res); + if (len < res) { + res = len; + } else { + len = mm->env_end - mm->env_start; + if (len > buff_size - res) + len = buff_size - res; + res += pvr_access_process_vm(task, mm->env_start, buffer+res, len, 0); + res = strnlen(buffer, res); + } + } +out_mm: + mmput(mm); +out: + return res; +} + +const char* OSGetPathBaseName(char * buffer, int buff_size) +{ + const char *base_name = buffer; + while (1) + { + const char *next = strnchr(base_name, buff_size, '/'); + if (!next) + break; + + buff_size -= (next - base_name -1); + base_name = (next + 1); + + } + return base_name; +} + + +/*! +****************************************************************************** + + @Function OSGetPageSize + + @Description gets page size + + @Return page size + +******************************************************************************/ +IMG_UINT32 OSGetPageSize(IMG_VOID) +{ +#if defined(__sh__) + IMG_UINT32 ui32ReturnValue = PAGE_SIZE; + + return (ui32ReturnValue); +#else + return PAGE_SIZE; +#endif +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) +/*! +****************************************************************************** + + @Function DeviceISRWrapper + + @Description wrapper for Device ISR function to conform to ISR OS interface + + @Return + +******************************************************************************/ +static irqreturn_t DeviceISRWrapper(int irq, void *dev_id +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + , struct pt_regs *regs +#endif + ) +{ + PVRSRV_DEVICE_NODE *psDeviceNode; + IMG_BOOL bStatus = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(irq); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + PVR_UNREFERENCED_PARAMETER(regs); +#endif + psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id; + if(!psDeviceNode) + { + PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n")); + goto out; + } + + bStatus = PVRSRVDeviceLISR(psDeviceNode); + + if (bStatus) + { + OSScheduleMISR((IMG_VOID *)psDeviceNode->psSysData); + } + +out: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) + return bStatus ? IRQ_HANDLED : IRQ_NONE; +#endif +} + + + +/*! +****************************************************************************** + + @Function SystemISRWrapper + + @Description wrapper for System ISR function to conform to ISR OS interface + + @Input Interrupt - NT interrupt object. + @Input Context - Context parameter + + @Return + +******************************************************************************/ +static irqreturn_t SystemISRWrapper(int irq, void *dev_id +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + , struct pt_regs *regs +#endif + ) +{ + SYS_DATA *psSysData; + IMG_BOOL bStatus = IMG_FALSE; + + PVR_UNREFERENCED_PARAMETER(irq); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + PVR_UNREFERENCED_PARAMETER(regs); +#endif + psSysData = (SYS_DATA *)dev_id; + if(!psSysData) + { + PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n")); + goto out; + } + + bStatus = PVRSRVSystemLISR(psSysData); + + if (bStatus) + { + OSScheduleMISR((IMG_VOID *)psSysData); + } + +out: +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) + return bStatus ? IRQ_HANDLED : IRQ_NONE; +#endif +} +/*! +****************************************************************************** + + @Function OSInstallDeviceLISR + + @Description Installs a Device ISR + + @Input pvSysData + @Input ui32Irq - IRQ number + @Input pszISRName - ISR name + @Input pvDeviceNode - device node contains ISR function and data argument + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData, + IMG_UINT32 ui32Irq, + IMG_CHAR *pszISRName, + IMG_VOID *pvDeviceNode) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %p", pszISRName, ui32Irq, pvDeviceNode)); + + if(request_irq(ui32Irq, DeviceISRWrapper, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) + SA_SHIRQ +#else + IRQF_SHARED +#endif + , pszISRName, pvDeviceNode)) + { + PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq)); + + return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + } + + psEnvData->ui32IRQ = ui32Irq; + psEnvData->pvISRCookie = pvDeviceNode; + psEnvData->bLISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSUninstallDeviceLISR + + @Description Uninstalls a Device ISR + + @Input pvSysData - sysdata + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + + free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie); + + psEnvData->bLISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSInstallSystemLISR + + @Description Installs a System ISR + + @Input psSysData + @Input ui32Irq - IRQ number + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing system LISR on IRQ %d with cookie %p", ui32Irq, pvSysData)); + + if(request_irq(ui32Irq, SystemISRWrapper, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) + SA_SHIRQ +#else + IRQF_SHARED +#endif + , PVRSRV_MODNAME, pvSysData)) + { + PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq)); + + return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; + } + + psEnvData->ui32IRQ = ui32Irq; + psEnvData->pvISRCookie = pvSysData; + psEnvData->bLISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallSystemLISR + + @Description Uninstalls a System ISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bLISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %p", psEnvData->ui32IRQ, psEnvData->pvISRCookie)); + + free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie); + + psEnvData->bLISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + +#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) +/*! +****************************************************************************** + + @Function MISRWrapper + + @Description OS dependent MISR wrapper + + @Input psSysData + + @Return error status + +******************************************************************************/ +static void MISRWrapper( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + void *data +#else + struct work_struct *data +#endif +) +{ + ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork); + SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData; + + PVRSRVMISR(psSysData); +} + + +/*! +****************************************************************************** + + @Function OSInstallMISR + + @Description Installs an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed")); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing MISR with cookie %p", pvSysData)); + + psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue"); + + if (psEnvData->psWorkQueue == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed")); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + } + + INIT_WORK(&psEnvData->sMISRWork, MISRWrapper +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + , (void *)&psEnvData->sMISRWork +#endif + ); + + psEnvData->pvMISRData = pvSysData; + psEnvData->bMISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallMISR + + @Description Uninstalls an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling MISR")); + + destroy_workqueue(psEnvData->psWorkQueue); + + psEnvData->bMISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSScheduleMISR + + @Description Schedules an OS dependent MISR + + @Input pvSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork); + } + + return PVRSRV_OK; +} +#else /* defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */ +#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) +/*! +****************************************************************************** + + @Function MISRWrapper + + @Description OS dependent MISR wrapper + + @Input psSysData + + @Return error status + +******************************************************************************/ +static void MISRWrapper( +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + void *data +#else + struct work_struct *data +#endif +) +{ + ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork); + SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData; + + PVRSRVMISR(psSysData); +} + + +/*! +****************************************************************************** + + @Function OSInstallMISR + + @Description Installs an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed")); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing MISR with cookie %p", pvSysData)); + + INIT_WORK(&psEnvData->sMISRWork, MISRWrapper +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) + , (void *)&psEnvData->sMISRWork +#endif + ); + + psEnvData->pvMISRData = pvSysData; + psEnvData->bMISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallMISR + + @Description Uninstalls an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling MISR")); + + flush_scheduled_work(); + + psEnvData->bMISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSScheduleMISR + + @Description Schedules an OS dependent MISR + + @Input pvSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + schedule_work(&psEnvData->sMISRWork); + } + + return PVRSRV_OK; +} + +#else /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */ + + +/*! +****************************************************************************** + + @Function MISRWrapper + + @Description OS dependent MISR wrapper + + @Input psSysData + + @Return error status + +******************************************************************************/ +static void MISRWrapper(unsigned long data) +{ + SYS_DATA *psSysData; + + psSysData = (SYS_DATA *)data; + + PVRSRVMISR(psSysData); +} + + +/*! +****************************************************************************** + + @Function OSInstallMISR + + @Description Installs an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed")); + return PVRSRV_ERROR_ISR_ALREADY_INSTALLED; + } + + PVR_TRACE(("Installing MISR with cookie %p", pvSysData)); + + tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData); + + psEnvData->bMISRInstalled = IMG_TRUE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSUninstallMISR + + @Description Uninstalls an OS dependent MISR + + @Input psSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData; + + if (!psEnvData->bMISRInstalled) + { + PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed")); + return PVRSRV_ERROR_ISR_NOT_INSTALLED; + } + + PVR_TRACE(("Uninstalling MISR")); + + tasklet_kill(&psEnvData->sMISRTasklet); + + psEnvData->bMISRInstalled = IMG_FALSE; + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSScheduleMISR + + @Description Schedules an OS dependent MISR + + @Input pvSysData + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData) +{ + SYS_DATA *psSysData = (SYS_DATA*)pvSysData; + ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData; + + if (psEnvData->bMISRInstalled) + { + tasklet_schedule(&psEnvData->sMISRTasklet); + } + + return PVRSRV_OK; +} + +#endif /* #if defined(PVR_LINUX_MISR_USING_WORKQUEUE) */ +#endif /* #if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) */ + +#endif /* #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) */ + +IMG_VOID OSPanic(IMG_VOID) +{ + BUG(); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +#define OS_TAS(p) xchg((p), 1) +#else +#define OS_TAS(p) tas(p) +#endif +/*! +****************************************************************************** + + @Function OSLockResource + + @Description locks an OS dependant Resource + + @Input phResource - pointer to OS dependent Resource + @Input bBlock - do we want to block? + + @Return error status + +******************************************************************************/ +PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE *psResource, + IMG_UINT32 ui32ID) + +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(!OS_TAS(&psResource->ui32Lock)) + psResource->ui32ID = ui32ID; + else + eError = PVRSRV_ERROR_UNABLE_TO_LOCK_RESOURCE; + + return eError; +} + + +/*! +****************************************************************************** + + @Function OSUnlockResource + + @Description unlocks an OS dependant resource + + @Input phResource - pointer to OS dependent resource structure + + @Return + +******************************************************************************/ +PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID) +{ + volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock; + PVRSRV_ERROR eError = PVRSRV_OK; + + if(*pui32Access) + { + if(psResource->ui32ID == ui32ID) + { + psResource->ui32ID = 0; + smp_mb(); + *pui32Access = 0; + } + else + { + PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource)); + PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID)); + eError = PVRSRV_ERROR_INVALID_LOCK_ID; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource)); + eError = PVRSRV_ERROR_RESOURCE_NOT_LOCKED; + } + + return eError; +} + + +/*! +****************************************************************************** + + @Function OSIsResourceLocked + + @Description tests if resource is locked + + @Input phResource - pointer to OS dependent resource structure + + @Return error status + +******************************************************************************/ +IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID) +{ + volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock; + + return (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID) + ? IMG_TRUE + : IMG_FALSE; +} + + +#if !defined(SYS_CUSTOM_POWERLOCK_WRAP) +PVRSRV_ERROR OSPowerLockWrap(IMG_BOOL bTryLock) +{ + PVR_UNREFERENCED_PARAMETER(bTryLock); + + return PVRSRV_OK; +} + +IMG_VOID OSPowerLockUnwrap (IMG_VOID) +{ +} +#endif /* SYS_CUSTOM_POWERLOCK_WRAP */ + + +IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_HANDLE hOSMemHandle, + IMG_VOID *pvLinAddr) +{ + IMG_CPU_PHYADDR CpuPAddr; + LinuxMemArea *psLinuxMemArea; + IMG_UINTPTR_T uiByteOffset; + IMG_UINT32 ui32ByteOffset; + + PVR_ASSERT(hOSMemHandle != IMG_NULL); + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + uiByteOffset = (IMG_UINTPTR_T)pvLinAddr - (IMG_UINTPTR_T)LinuxMemAreaToCpuVAddr(psLinuxMemArea); + ui32ByteOffset = (IMG_UINT32)uiByteOffset; + + CpuPAddr = LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset); + + return CpuPAddr; +} + + +/*! +****************************************************************************** + + @Function OSMapPhysToLin + + @Description Maps the physical memory into linear addr range + + @Input BasePAddr : physical cpu address + + @Input ui32Bytes - bytes to map + + @Input ui32CacheType - cache type + + @Return : Linear addr of mapping on success, else NULL + + ******************************************************************************/ +IMG_VOID * +OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE *phOSMemHandle) +{ + if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) + { + /* + * Provide some backwards compatibility, until all callers + * have been updated to pass a non-null OSMemHandle pointer. + * Such callers must not call OSMapLinToCPUPhys. + */ + if(phOSMemHandle == IMG_NULL) + { + IMG_VOID *pvIORemapCookie; + pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags); + if(pvIORemapCookie == IMG_NULL) + { + return IMG_NULL; + } + return pvIORemapCookie; + } + else + { + LinuxMemArea *psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags); + + if(psLinuxMemArea == IMG_NULL) + { + return IMG_NULL; + } + + *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea; + return LinuxMemAreaToCpuVAddr(psLinuxMemArea); + } + } + + PVR_DPF((PVR_DBG_ERROR, + "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY " + " (Use OSReservePhys otherwise)")); + + return IMG_NULL; +} + +/*! +****************************************************************************** + @Function OSUnMapPhysToLin + @Description Unmaps memory that was mapped with OSMapPhysToLin + @Return TRUE on success, else FALSE +******************************************************************************/ +IMG_BOOL +OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + + if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY) + { + if (hOSMemHandle == IMG_NULL) + { + IOUnmapWrapper(pvLinAddr); + } + else + { + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + PVR_ASSERT(LinuxMemAreaToCpuVAddr(psLinuxMemArea) == pvLinAddr); + + FreeIORemapLinuxMemArea(psLinuxMemArea); + } + + return IMG_TRUE; + } + + PVR_DPF((PVR_DBG_ERROR, + "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY " + " (Use OSUnReservePhys otherwise)")); + return IMG_FALSE; +} + +/*! +****************************************************************************** + @Function RegisterExternalMem + @Description Registers external memory for user mode mapping + @Return TRUE on success, else FALSE, MemHandle out +******************************************************************************/ +static PVRSRV_ERROR +RegisterExternalMem(IMG_SYS_PHYADDR *pBasePAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 ui32Bytes, + IMG_BOOL bPhysContig, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE *phOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + { + psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags); + + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + break; + } + case PVRSRV_HAP_SINGLE_PROCESS: + { + psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags); + + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + case PVRSRV_HAP_MULTI_PROCESS: + { + /* Currently PVRSRV_HAP_MULTI_PROCESS implies that we need a kernel + * virtual mapping and potentially multiple user space virtual mappings. + * Beware that the kernel virtual address space is a limited resource. + */ +#if defined(VIVT_CACHE) || defined(__sh__) + /* + * ARM9 caches are tagged with virtual pages, not physical. As we are going to + * share this memory in different address spaces, we don't want it to be cached. + * ARM11 has physical tagging, so we can cache this memory without fear of virtual + * address aliasing in the TLB, as long as the kernel supports cache colouring for + * VIPT architectures. + */ + ui32MappingFlags &= ~PVRSRV_HAP_CACHED; +#endif + psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags); + + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags)); + *phOSMemHandle = (IMG_HANDLE)0; + return PVRSRV_ERROR_INVALID_FLAGS; + } + + *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea; + + LinuxMemAreaRegister(psLinuxMemArea); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + @Function OSRegisterMem + @Description Registers external memory for user mode mapping + @Output phOSMemHandle - handle to registered memory + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE *phOSMemHandle) +{ + IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr); + + return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE, ui32MappingFlags, phOSMemHandle); +} + + +PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE *phOSMemHandle) +{ + return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, IMG_FALSE, ui32MappingFlags, phOSMemHandle); +} + + +/*! +****************************************************************************** + @Function OSUnRegisterMem + @Description UnRegisters external memory for user mode mapping + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSUnRegisterMem (IMG_VOID *pvCpuVAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + break; + case PVRSRV_HAP_SINGLE_PROCESS: + case PVRSRV_HAP_MULTI_PROCESS: + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!", + __FUNCTION__, pvCpuVAddr, ui32Bytes, + ui32MappingFlags, hOSMemHandle)); + return eError; + } + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle) +{ + return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle); +} + +/*! +****************************************************************************** + @Function OSReservePhys + @Description Registers physical memory for user mode mapping + @Output ppvCpuVAddr + @Output phOsMemHandle handle to registered memory + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSReservePhys(IMG_CPU_PHYADDR BasePAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE hBMHandle, + IMG_VOID **ppvCpuVAddr, + IMG_HANDLE *phOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + +#if 0 + /* For debug: force all OSReservePhys reservations to have a kernel + * virtual address */ + if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS) + { + ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS; + ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS; + } +#endif + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + { + /* Currently PVRSRV_HAP_KERNEL_ONLY implies that a kernel virtual + * mapping is required for the allocation and no user virtual + * mappings are allowed: Note these eat into our limited kernel + * virtual address space */ + psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + break; + } + case PVRSRV_HAP_SINGLE_PROCESS: + { + /* Currently this implies that we dont need a kernel virtual + * mapping, but will need a user space virtual mapping */ + psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + case PVRSRV_HAP_MULTI_PROCESS: + { + /* Currently PVRSRV_HAP_MULTI_PROCESS implies that we need a kernel + * virtual mapping and potentially multiple user space virtual mappings. + * Beware that the kernel virtual address space is a limited resource. + */ +#if defined(VIVT_CACHE) || defined(__sh__) + /* + * ARM9 caches are tagged with virtual pages, not physical. As we are going to + * share this memory in different address spaces, we don't want it to be cached. + * ARM11 has physical tagging, so we can cache this memory without fear of virtual + * address aliasing in the TLB, as long as the kernel supports cache colouring for + * VIPT architectures. + */ + ui32MappingFlags &= ~PVRSRV_HAP_CACHED; +#endif + psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags); + if(!psLinuxMemArea) + { + return PVRSRV_ERROR_BAD_MAPPING; + } + PVRMMapRegisterArea(psLinuxMemArea); + break; + } + default: + PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags)); + *ppvCpuVAddr = NULL; + *phOSMemHandle = (IMG_HANDLE)0; + return PVRSRV_ERROR_INVALID_FLAGS; + } + + /* + In case of sparse mapping we need to handle back to the BM as it + knows the mapping info + */ + if (ui32MappingFlags & PVRSRV_MEM_SPARSE) + { + PVR_ASSERT(hBMHandle != IMG_NULL); + psLinuxMemArea->hBMHandle = hBMHandle; + } + + *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea; + *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea); + + LinuxMemAreaRegister(psLinuxMemArea); + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + @Function OSUnReservePhys + @Description UnRegisters physical memory for user mode mapping + @Return TRUE on success, else FALSE +******************************************************************************/ +PVRSRV_ERROR +OSUnReservePhys(IMG_VOID *pvCpuVAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32MappingFlags, + IMG_HANDLE hOSMemHandle) +{ + LinuxMemArea *psLinuxMemArea; + PVRSRV_ERROR eError; + + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + + psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + + switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK) + { + case PVRSRV_HAP_KERNEL_ONLY: + break; + case PVRSRV_HAP_SINGLE_PROCESS: + case PVRSRV_HAP_MULTI_PROCESS: + { + eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!", + __FUNCTION__, pvCpuVAddr, ui32Bytes, + ui32MappingFlags, hOSMemHandle)); + return eError; + } + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags)); + return PVRSRV_ERROR_INVALID_PARAMS; + } + } + + LinuxMemAreaDeepFree(psLinuxMemArea); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + @Function OSBaseAllocContigMemory + @Description Allocate a block of contiguous virtual non-paged memory. + @Input ui32Size - number of bytes to allocate + @Output ppvLinAddr - pointer to variable that will receive the linear address of buffer + @Return PVRSRV_OK if allocation successed else returns PVRSRV_ERROR_OUT_OF_MEMORY + **************************************************************************/ +PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pvLinAddr, IMG_CPU_PHYADDR *psPhysAddr) +{ +#if !defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(pvLinAddr); + PVR_UNREFERENCED_PARAMETER(psPhysAddr); + PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__)); + + return PVRSRV_ERROR_OUT_OF_MEMORY; +#else +/* + * On Linux, the returned virtual address should be used for CPU access, + * and not be remapped into the CPU virtual address using ioremap. The fact + * that the RAM is being managed by the kernel, and already has a virtual + * address, seems to lead to problems when the attributes of the memory are + * changed in the ioremap call (such as from cached to non-cached). + */ + IMG_VOID *pvKernLinAddr; + +#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + pvKernLinAddr = _KMallocWrapper(ui32Size, GFP_KERNEL, __FILE__, __LINE__); +#else + pvKernLinAddr = KMallocWrapper(ui32Size, GFP_KERNEL); +#endif + if (!pvKernLinAddr) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + *pvLinAddr = pvKernLinAddr; + + psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr); + + return PVRSRV_OK; +#endif /* !defined(NO_HARDWARE) */ +} + + +/*! +****************************************************************************** + @Function OSBaseFreeContigMemory + @Description Frees memory allocated with OSBaseAllocContigMemory + @Input LinAddr - pointer to buffer allocated with OSBaseAllocContigMemory + **************************************************************************/ +PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR pvLinAddr, IMG_CPU_PHYADDR psPhysAddr) +{ +#if !defined(NO_HARDWARE) + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(pvLinAddr); + PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr); + + PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__)); +#else + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr); + + KFreeWrapper(pvLinAddr); +#endif + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSWriteHWReg + + @Description + + register access function + + @input pvLinRegBaseAddr : lin addr of register block base + + @input ui32Offset : + + @input ui32Value : + + @Return none + +******************************************************************************/ + +IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset) +{ +#if !defined(NO_HARDWARE) + return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset); +#else + return *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset); +#endif +} + +IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) +{ +#if !defined(NO_HARDWARE) + writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset); +#else + *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset) = ui32Value; +#endif +} + +#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) + +/*! +****************************************************************************** + + @Function OSPCISetDev + + @Description + + Set a PCI device for subsequent use. + + @input pvPCICookie : Pointer to OS specific PCI structure/cookie + + @input eFlags : Flags + + @Return Pointer to PCI device handle + +******************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) +{ + int err; + IMG_UINT32 i; + PVR_PCI_DEV *psPVRPCI; + + PVR_TRACE(("OSPCISetDev")); + + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID **)&psPVRPCI, IMG_NULL, + "PCI Device") != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure")); + return IMG_NULL; + } + + psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie; + psPVRPCI->ePCIFlags = eFlags; + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err)); + return IMG_NULL; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_set_master(psPVRPCI->psPCIDev); + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { +#if defined(CONFIG_PCI_MSI) + err = pci_enable_msi(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: Couldn't enable MSI (%d)", err)); + psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */ + } +#else + PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: MSI support not enabled in the kernel")); +#endif + } + + /* Initialise the PCI resource tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; + } + + return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; +} + +/*! +****************************************************************************** + + @Function OSPCIAcquireDev + + @Description + + Acquire a PCI device for subsequent use. + + @input ui16VendorID : Vendor PCI ID + + @input ui16VendorID : Device PCI ID + + @input eFlags : Flags + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags) +{ + struct pci_dev *psPCIDev; + + psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL); + if (psPCIDev == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device")); + return IMG_NULL; + } + + return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags); +} + +/*! +****************************************************************************** + + @Function OSPCIIRQ + + @Description + + Get the interrupt number for the device. + + @input hPVRPCI : PCI device handle + + @input pui32IRQ : Pointer to where the interrupt number should be returned + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + *pui32IRQ = psPVRPCI->psPCIDev->irq; + + return PVRSRV_OK; +} + +/* Functions supported by OSPCIAddrRangeFunc */ +enum HOST_PCI_ADDR_RANGE_FUNC +{ + HOST_PCI_ADDR_RANGE_FUNC_LEN, + HOST_PCI_ADDR_RANGE_FUNC_START, + HOST_PCI_ADDR_RANGE_FUNC_END, + HOST_PCI_ADDR_RANGE_FUNC_REQUEST, + HOST_PCI_ADDR_RANGE_FUNC_RELEASE +}; + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeFunc + + @Description + + Internal support function for various address range related functions + + @input eFunc : Function to perform + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return function dependent + +******************************************************************************/ +static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, + PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + + if (ui32Index >= DEVICE_COUNT_RESOURCE) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range")); + return 0; + + } + + switch (eFunc) + { + case HOST_PCI_ADDR_RANGE_FUNC_LEN: + return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); + case HOST_PCI_ADDR_RANGE_FUNC_START: + return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); + case HOST_PCI_ADDR_RANGE_FUNC_END: + return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); + case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: + { + int err; + + err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err)); + return 0; + } + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; + return 1; + } + case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: + if (psPVRPCI->abPCIResourceInUse[ui32Index]) + { + pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); + psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; + } + return 1; + default: + PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function")); + break; + } + + return 0; +} + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeLen + + @Description + + Returns length of a given address range length + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return Length of address range, or 0 if no such range + +******************************************************************************/ +IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); +} + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeStart + + @Description + + Returns the start of a given address range + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return Start of address range, or 0 if no such range + +******************************************************************************/ +IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); +} + +/*! +****************************************************************************** + + @Function OSPCIAddrRangeEnd + + @Description + + Returns the end of a given address range + + @input hPVRPCI : PCI device handle"ayy + + @input ui32Index : Address range index + + @Return End of address range, or 0 if no such range + +******************************************************************************/ +IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); +} + +/*! +****************************************************************************** + + @Function OSPCIRequestAddrRange + + @Description + + Request a given address range index for subsequent use + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, + IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_PCI_CALL_FAILED : PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCIReleaseAddrRange + + @Description + + Release a given address range that is no longer being used + + @input hPVRPCI : PCI device handle + + @input ui32Index : Address range index + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) +{ + return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_PCI_CALL_FAILED : PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCIReleaseDev + + @Description + + Release a PCI device that is no longer being used + + @input hPVRPCI : PCI device handle + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + + PVR_TRACE(("OSPCIReleaseDev")); + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i)); + pci_release_region(psPVRPCI->psPCIDev, i); + psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; + } + } + +#if defined(CONFIG_PCI_MSI) + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_disable_msi(psPVRPCI->psPCIDev); + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + { + pci_clear_master(psPVRPCI->psPCIDev); + } +#endif + pci_disable_device(psPVRPCI->psPCIDev); + + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL); + /*not nulling pointer, copy on stack*/ + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCISuspendDev + + @Description + + Prepare PCI device to be turned off by power management + + @input hPVRPCI : PCI device handle + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int i; + int err; + + PVR_TRACE(("OSPCISuspendDev")); + + /* Release all PCI regions that are currently in use */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + pci_release_region(psPVRPCI->psPCIDev, i); + } + } + + err = pci_save_state(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err)); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + pci_disable_device(psPVRPCI->psPCIDev); + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND)); + switch(err) + { + case 0: + break; + case -EIO: + PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM")); + break; + case -EINVAL: + PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state")); + break; + default: + PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err)); + break; + } + + return PVRSRV_OK; +} + +/*! +****************************************************************************** + + @Function OSPCIResumeDev + + @Description + + Prepare a PCI device to be resumed by power management + + @input hPVRPCI : PCI device handle + + @input pvPCICookie : Pointer to OS specific PCI structure/cookie + + @input eFlags : Flags + + @Return PVESRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) +{ + PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; + int err; + int i; + + PVR_TRACE(("OSPCIResumeDev")); + + err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON)); + switch(err) + { + case 0: + break; + case -EIO: + PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM")); + break; + case -EINVAL: + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state")); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + default: + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err)); + return PVRSRV_ERROR_UNKNOWN_POWER_STATE; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) + pci_restore_state(psPVRPCI->psPCIDev); +#else + err = pci_restore_state(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err)); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } +#endif + + err = pci_enable_device(psPVRPCI->psPCIDev); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err)); + return PVRSRV_ERROR_PCI_CALL_FAILED; + } + + if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ + pci_set_master(psPVRPCI->psPCIDev); + + /* Restore the PCI resource tracking array */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) + { + if (psPVRPCI->abPCIResourceInUse[i]) + { + err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME); + if (err != 0) + { + PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err)); + } + } + + } + + return PVRSRV_OK; +} + +#endif /* #if defined(CONFIG_PCI) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)) */ + +#define OS_MAX_TIMERS 8 + +/* Timer callback strucure used by OSAddTimer */ +typedef struct TIMER_CALLBACK_DATA_TAG +{ + IMG_BOOL bInUse; + PFN_TIMER_FUNC pfnTimerFunc; + IMG_VOID *pvData; + struct timer_list sTimer; + IMG_UINT32 ui32Delay; + IMG_BOOL bActive; +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + struct work_struct sWork; +#endif +}TIMER_CALLBACK_DATA; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) +static struct workqueue_struct *psTimerWorkQueue; +#endif + +static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS]; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) +DEFINE_MUTEX(sTimerStructLock); +#else +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +/* The lock is used to control access to sTimers */ +/* PRQA S 0671,0685 1 */ /* C99 macro not understood by QAC */ +static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED; +#else +static DEFINE_SPINLOCK(sTimerStructLock); +#endif +#endif + +static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData) +{ + if (!psTimerCBData->bActive) + return; + + /* call timer callback */ + psTimerCBData->pfnTimerFunc(psTimerCBData->pvData); + + /* reset timer */ + mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies); +} + + +/*! +****************************************************************************** + + @Function OSTimerCallbackWrapper + + @Description + + OS specific timer callback wrapper function + + @Input ui32Data : timer callback data + + @Return NONE + +******************************************************************************/ +static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data) +{ + TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + int res; + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork); +#else + res = schedule_work(&psTimerCBData->sWork); +#endif + if (res == 0) + { + PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued")); + } +#else + OSTimerCallbackBody(psTimerCBData); +#endif +} + + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) +static void OSTimerWorkQueueCallBack(struct work_struct *psWork) +{ + TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork); + + OSTimerCallbackBody(psTimerCBData); +} +#endif + +/*! +****************************************************************************** + + @Function OSAddTimer + + @Description + + OS specific function to install a timer callback + + @Input pfnTimerFunc : timer callback + + @Input *pvData :callback data + + @Input ui32MsTimeout: callback period + + @Return IMG_HANDLE : valid handle success, NULL failure + +******************************************************************************/ +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout) +{ + TIMER_CALLBACK_DATA *psTimerCBData; + IMG_UINT32 ui32i; +#if !(defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE)) + unsigned long ulLockFlags; +#endif + + /* check callback */ + if(!pfnTimerFunc) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback")); + return IMG_NULL; + } + + /* Allocate timer callback data structure */ +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + mutex_lock(&sTimerStructLock); +#else + spin_lock_irqsave(&sTimerStructLock, ulLockFlags); +#endif + for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) + { + psTimerCBData = &sTimers[ui32i]; + if (!psTimerCBData->bInUse) + { + psTimerCBData->bInUse = IMG_TRUE; + break; + } + } +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + mutex_unlock(&sTimerStructLock); +#else + spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags); +#endif + if (ui32i >= OS_MAX_TIMERS) + { + PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use")); + return IMG_NULL; + } + + psTimerCBData->pfnTimerFunc = pfnTimerFunc; + psTimerCBData->pvData = pvData; + psTimerCBData->bActive = IMG_FALSE; + + /* + HZ = ticks per second + ui32MsTimeout = required ms delay + ticks = (Hz * ui32MsTimeout) / 1000 + */ + psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000) + ? 1 + : ((HZ * ui32MsTimeout) / 1000); + /* initialise object */ + init_timer(&psTimerCBData->sTimer); + + /* setup timer object */ + /* PRQA S 0307,0563 1 */ /* ignore warning about inconpartible ptr casting */ + psTimerCBData->sTimer.function = (IMG_VOID *)OSTimerCallbackWrapper; + psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData; + + return (IMG_HANDLE)(ui32i + 1); +} + + +static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer) +{ + IMG_UINT32 ui32i = ((IMG_UINT32)hTimer) - 1; + + PVR_ASSERT(ui32i < OS_MAX_TIMERS); + + return &sTimers[ui32i]; +} + +/*! +****************************************************************************** + + @Function OSRemoveTimer + + @Description + + OS specific function to remove a timer callback + + @Input hTimer : timer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* free timer callback data struct */ + psTimerCBData->bInUse = IMG_FALSE; + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSEnableTimer + + @Description + + OS specific function to enable a timer callback + + @Input hTimer : timer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(!psTimerCBData->bActive); + + /* Start timer arming */ + psTimerCBData->bActive = IMG_TRUE; + + /* set the expire time */ + psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; + + /* Add the timer to the list */ + add_timer(&psTimerCBData->sTimer); + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSDisableTimer + + @Description + + OS specific function to disable a timer callback + + @Input hTimer : timer handle + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer) +{ + TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); + + PVR_ASSERT(psTimerCBData->bInUse); + PVR_ASSERT(psTimerCBData->bActive); + + /* Stop timer from arming */ + psTimerCBData->bActive = IMG_FALSE; + smp_mb(); + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + flush_workqueue(psTimerWorkQueue); +#endif +#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + flush_scheduled_work(); +#endif + + /* remove timer */ + del_timer_sync(&psTimerCBData->sTimer); + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + /* + * This second flush is to catch the case where the timer ran + * before we managed to delete it, in which case, it will have + * queued more work for the workqueue. Since the bActive flag + * has been cleared, this second flush won't result in the + * timer being rearmed. + */ + flush_workqueue(psTimerWorkQueue); +#endif +#if defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + flush_scheduled_work(); +#endif + + return PVRSRV_OK; +} + + +/*! +****************************************************************************** + + @Function OSEventObjectCreateKM + + @Description + + OS specific function to create an event object + + @Input pszName : Globally unique event object name (if null name must be autogenerated) + + @Output psEventObject : OS event object info structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT_KM *psEventObject) +#else +PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject) +#endif +{ + + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(pszName) + { + /* copy over the event object name */ + strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH); + } + else + { + /* autogenerate a name */ + static IMG_UINT16 ui16NameIndex = 0; +#if defined (SUPPORT_SID_INTERFACE) + snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_KM_%d", ui16NameIndex++); +#else + snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++); +#endif + } + + if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK) + { + eError = PVRSRV_ERROR_OUT_OF_MEMORY; + } + + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreateKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT; + } + + return eError; + +} + + +/*! +****************************************************************************** + + @Function OSEventObjectDestroyKM + + @Description + + OS specific function to destroy an event object + + @Input psEventObject : OS event object info structure + + @Return PVRSRV_ERROR : + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT_KM *psEventObject) +#else +PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT *psEventObject) +#endif +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(psEventObject->hOSEventKM) + { + LinuxEventObjectListDestroy(psEventObject->hOSEventKM); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: hOSEventKM is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSEventObjectWaitKM + + @Description + + OS specific function to wait for an event object. Called from client + + @Input hOSEventKM : OS and kernel specific handle to event object + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM) +{ + PVRSRV_ERROR eError; + + if(hOSEventKM) + { + eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKM: hOSEventKM is not a valid handle")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSEventObjectOpenKM + + @Description + + OS specific function to open an event object. Called from client + + @Input psEventObject : Pointer to an event object + @Output phOSEvent : OS and kernel specific handle to event object + + @Return PVRSRV_ERROR : + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT_KM *psEventObject, +#else +PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT *psEventObject, +#endif + IMG_HANDLE *phOSEvent) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreateKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSEventObjectCloseKM + + @Description + + OS specific function to close an event object. Called from client + + @Input psEventObject : Pointer to an event object + @OInput hOSEventKM : OS and kernel specific handle to event object + + + @Return PVRSRV_ERROR : + +******************************************************************************/ +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT_KM *psEventObject, +#else +PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT *psEventObject, +#endif + IMG_HANDLE hOSEventKM) +{ + PVRSRV_ERROR eError = PVRSRV_OK; + + if(psEventObject) + { + if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroyKM: psEventObject is not a valid pointer")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; + +} + +/*! +****************************************************************************** + + @Function OSEventObjectSignalKM + + @Description + + OS specific function to 'signal' an event object. Called from L/MISR + + @Input hOSEventKM : OS and kernel specific handle to event object + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM) +{ + PVRSRV_ERROR eError; + + if(hOSEventKM) + { + eError = LinuxEventObjectSignal(hOSEventKM); + } + else + { + PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignalKM: hOSEventKM is not a valid handle")); + eError = PVRSRV_ERROR_INVALID_PARAMS; + } + + return eError; +} + +/*! +****************************************************************************** + + @Function OSProcHasPrivSrvInit + + @Description + + Does the process have sufficient privileges to initialise services? + + @Input none + + @Return IMG_BOOL : + +******************************************************************************/ +IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID) +{ + return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE; +} + +/*! +****************************************************************************** + + @Function OSCopyToUser + + @Description + + Copy a block of data into user space + + @Input pvSrc + + @Output pvDest + + @Input ui32Bytes + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Bytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if(pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +/*! +****************************************************************************** + + @Function OSCopyFromUser + + @Description + + Copy a block of data from the user space + + @Output pvDest + + @Input pvSrc + + @Input ui32Bytes + + @Return PVRSRV_ERROR : + +******************************************************************************/ +PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess, + IMG_VOID *pvDest, + IMG_VOID *pvSrc, + IMG_UINT32 ui32Bytes) +{ + PVR_UNREFERENCED_PARAMETER(pvProcess); + + if(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0) + return PVRSRV_OK; + else + return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; +} + +/*! +****************************************************************************** + + @Function OSAccessOK + + @Description + + Checks if a user space pointer is valide + + @Input eVerification + + @Input pvUserPtr + + @Input ui32Bytes + + @Return IMG_BOOL : + +******************************************************************************/ +IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes) +{ + IMG_INT linuxType; + + if (eVerification == PVR_VERIFY_READ) + { + linuxType = VERIFY_READ; + } + else + { + PVR_ASSERT(eVerification == PVR_VERIFY_WRITE); + linuxType = VERIFY_WRITE; + } + + return access_ok(linuxType, pvUserPtr, ui32Bytes); +} + +typedef enum _eWrapMemType_ +{ + WRAP_TYPE_NULL = 0, + WRAP_TYPE_GET_USER_PAGES, + WRAP_TYPE_FIND_VMA +} eWrapMemType; + +typedef struct _sWrapMemInfo_ +{ + eWrapMemType eType; + IMG_INT iNumPages; + IMG_INT iNumPagesMapped; + struct page **ppsPages; + IMG_SYS_PHYADDR *psPhysAddr; + IMG_INT iPageOffset; +#if defined(DEBUG) + IMG_UINT32 ulStartAddr; + IMG_UINT32 ulBeyondEndAddr; + struct vm_area_struct *psVMArea; +#endif +} sWrapMemInfo; + + +/*! +****************************************************************************** + + @Function *CPUVAddrToPFN + + @Description + + Find the PFN associated with a given CPU virtual address, and return + the associated page structure, if it exists. + The page in question must be present (i.e. no fault handling required), + and must be writable. A get_page is done on the returned page structure. + + @Input psVMArea - pointer to VM area structure + ulCPUVAddr - CPU virtual address + pulPFN - Pointer to returned PFN. + ppsPAge - Pointer to returned page structure pointer. + + @Output *pulPFN - Set to PFN + *ppsPage - Pointer to the page structure if present, else NULL. + @Return IMG_TRUE if PFN lookup was succesful. + +******************************************************************************/ +static IMG_BOOL CPUVAddrToPFN(struct vm_area_struct *psVMArea, IMG_UINT32 ulCPUVAddr, IMG_UINT32 *pulPFN, struct page **ppsPage) +{ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) + pgd_t *psPGD; + pud_t *psPUD; + pmd_t *psPMD; + pte_t *psPTE; + struct mm_struct *psMM = psVMArea->vm_mm; + spinlock_t *psPTLock; + IMG_BOOL bRet = IMG_FALSE; + + *pulPFN = 0; + *ppsPage = NULL; + + psPGD = pgd_offset(psMM, ulCPUVAddr); + if (pgd_none(*psPGD) || pgd_bad(*psPGD)) + return bRet; + + psPUD = pud_offset(psPGD, ulCPUVAddr); + if (pud_none(*psPUD) || pud_bad(*psPUD)) + return bRet; + + psPMD = pmd_offset(psPUD, ulCPUVAddr); + if (pmd_none(*psPMD) || pmd_bad(*psPMD)) + return bRet; + + psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock); + + if ((pte_none(*psPTE) == 0) && (pte_present(*psPTE) != 0) && (pte_write(*psPTE) != 0)) + { + *pulPFN = pte_pfn(*psPTE); + bRet = IMG_TRUE; + + if (pfn_valid(*pulPFN)) + { + *ppsPage = pfn_to_page(*pulPFN); + + get_page(*ppsPage); + } + } + + pte_unmap_unlock(psPTE, psPTLock); + + return bRet; +#else + return IMG_FALSE; +#endif +} + +/*! +****************************************************************************** + + @Function OSReleasePhysPageAddr + + @Description + + Release wrapped memory. + + @Input hOSWrapMem : Driver cookie + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem) +{ + sWrapMemInfo *psInfo = (sWrapMemInfo *)hOSWrapMem; + IMG_INT i; + + if (psInfo == IMG_NULL) + { + PVR_DPF((PVR_DBG_WARNING, + "OSReleasePhysPageAddr: called with null wrap handle")); + return PVRSRV_OK; + } + + switch (psInfo->eType) + { + case WRAP_TYPE_NULL: + { + PVR_DPF((PVR_DBG_WARNING, + "OSReleasePhysPageAddr: called with wrap type WRAP_TYPE_NULL")); + break; + } + case WRAP_TYPE_GET_USER_PAGES: + { + for (i = 0; i < psInfo->iNumPagesMapped; i++) + { + struct page *psPage = psInfo->ppsPages[i]; + + PVR_ASSERT(psPage != NULL); + + /* + * If the number of pages mapped is not the same as + * the number of pages in the address range, then + * get_user_pages must have failed, so we are cleaning + * up after failure, and the pages can't be dirty. + */ + if (psInfo->iNumPagesMapped == psInfo->iNumPages) + { + if (!PageReserved(psPage)) + { + SetPageDirty(psPage); + } + } + page_cache_release(psPage); + } + break; + } + case WRAP_TYPE_FIND_VMA: + { + for (i = 0; i < psInfo->iNumPages; i++) + { + if (psInfo->ppsPages[i] != IMG_NULL) + { + put_page(psInfo->ppsPages[i]); + } + } + break; + } + default: + { + PVR_DPF((PVR_DBG_ERROR, + "OSReleasePhysPageAddr: Unknown wrap type (%d)", psInfo->eType)); + return PVRSRV_ERROR_INVALID_WRAP_TYPE; + } + } + + if (psInfo->ppsPages != IMG_NULL) + { + kfree(psInfo->ppsPages); + } + + if (psInfo->psPhysAddr != IMG_NULL) + { + kfree(psInfo->psPhysAddr); + } + + kfree(psInfo); + + return PVRSRV_OK; +} + +#if defined(CONFIG_TI_TILER) || defined(CONFIG_DRM_OMAP_DMM_TILER) + +static IMG_UINT32 CPUAddrToTilerPhy(IMG_UINT32 uiAddr) +{ + IMG_UINT32 ui32PhysAddr = 0; + pte_t *ptep, pte; + pgd_t *pgd; + pmd_t *pmd; + pud_t *pud; + + pgd = pgd_offset(current->mm, uiAddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) + goto err_out; + + pud = pud_offset(pgd, uiAddr); + if (pud_none(*pud) || pud_bad(*pud)) + goto err_out; + + pmd = pmd_offset(pud, uiAddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) + goto err_out; + + ptep = pte_offset_map(pmd, uiAddr); + if (!ptep) + goto err_out; + + pte = *ptep; + if (!pte_present(pte)) + goto err_out; + + ui32PhysAddr = (pte & PAGE_MASK) | (~PAGE_MASK & uiAddr); + + /* If the physAddr is not in the TILER physical range + * then we don't proceed. + */ + if (ui32PhysAddr < 0x60000000 && ui32PhysAddr > 0x7fffffff) + { + PVR_DPF((PVR_DBG_ERROR, "CPUAddrToTilerPhy: Not in tiler range")); + ui32PhysAddr = 0; + goto err_out; + } + +err_out: + return ui32PhysAddr; +} + +#endif /* defined(CONFIG_TI_TILER) || defined(CONFIG_DRM_OMAP_DMM_TILER) */ + +/*! +****************************************************************************** + + @Function OSAcquirePhysPageAddr + + @Description + + @Return PVRSRV_ERROR + +******************************************************************************/ +PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID *pvCPUVAddr, + IMG_UINT32 ui32Bytes, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_HANDLE *phOSWrapMem) +{ + IMG_UINT32 ulStartAddrOrig = (IMG_UINT32) pvCPUVAddr; + IMG_UINT32 ulAddrRangeOrig = (IMG_UINT32) ui32Bytes; + IMG_UINT32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig; + IMG_UINT32 ulStartAddr; + IMG_UINT32 ulAddrRange; + IMG_UINT32 ulBeyondEndAddr; + IMG_UINT32 ulAddr; + IMG_INT i; + struct vm_area_struct *psVMArea; + sWrapMemInfo *psInfo = NULL; + IMG_BOOL bHavePageStructs = IMG_FALSE; + IMG_BOOL bHaveNoPageStructs = IMG_FALSE; + IMG_BOOL bMMapSemHeld = IMG_FALSE; + PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; + + /* Align start and end addresses to page boundaries */ + ulStartAddr = ulStartAddrOrig & PAGE_MASK; + ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig); + ulAddrRange = ulBeyondEndAddr - ulStartAddr; + + /* + * Check for address range calculation overflow, and attempts to wrap + * zero bytes. + */ + if (ulBeyondEndAddr <= ulStartAddr) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Invalid address range (start %x, length %x)", + ulStartAddrOrig, ulAddrRangeOrig)); + goto error; + } + + /* Allocate information structure */ + psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL); + if (psInfo == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't allocate information structure")); + goto error; + } + memset(psInfo, 0, sizeof(*psInfo)); + +#if defined(DEBUG) + psInfo->ulStartAddr = ulStartAddrOrig; + psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig; +#endif + + psInfo->iNumPages = (IMG_INT)(ulAddrRange >> PAGE_SHIFT); + psInfo->iPageOffset = (IMG_INT)(ulStartAddrOrig & ~PAGE_MASK); + + /* Allocate physical address array */ + psInfo->psPhysAddr = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), GFP_KERNEL); + if (psInfo->psPhysAddr == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't allocate page array")); + goto error; + } + memset(psInfo->psPhysAddr, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr)); + + /* Allocate page array */ + psInfo->ppsPages = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL); + if (psInfo->ppsPages == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't allocate page array")); + goto error; + } + memset(psInfo->ppsPages, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages)); + + /* Default error code from now on */ + eError = PVRSRV_ERROR_BAD_MAPPING; + + /* Set the mapping type to aid clean up */ + psInfo->eType = WRAP_TYPE_GET_USER_PAGES; + + /* Lock down user memory */ + down_read(¤t->mm->mmap_sem); + bMMapSemHeld = IMG_TRUE; + + /* Get page list */ + psInfo->iNumPagesMapped = get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, 1, 0, psInfo->ppsPages, NULL); + + if (psInfo->iNumPagesMapped >= 0) + { + /* See if we got all the pages we wanted */ + if (psInfo->iNumPagesMapped != psInfo->iNumPages) + { + PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, psInfo->iNumPagesMapped)); + + goto error; + } + + /* Build list of physical page addresses */ + for (i = 0; i < psInfo->iNumPages; i++) + { + IMG_CPU_PHYADDR CPUPhysAddr; + IMG_UINT32 ulPFN; + + ulPFN = page_to_pfn(psInfo->ppsPages[i]); + CPUPhysAddr.uiAddr = ulPFN << PAGE_SHIFT; + if ((CPUPhysAddr.uiAddr >> PAGE_SHIFT) != ulPFN) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Page frame number out of range (%x)", ulPFN)); + + goto error; + } + psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr); + psSysPAddr[i] = psInfo->psPhysAddr[i]; + + } + + goto exit; + } + + PVR_DPF((PVR_DBG_MESSAGE, "OSAcquirePhysPageAddr: get_user_pages failed (%d), using CPU page table", psInfo->iNumPagesMapped)); + + /* Reset some fields */ + psInfo->eType = WRAP_TYPE_NULL; + psInfo->iNumPagesMapped = 0; + memset(psInfo->ppsPages, 0, (size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages)); + + /* + * get_user_pages didn't work. If this is due to the address range + * representing memory mapped I/O, then we'll look for the pages + * in the appropriate memory region of the process. + */ + + /* Set the mapping type to aid clean up */ + psInfo->eType = WRAP_TYPE_FIND_VMA; + + psVMArea = find_vma(current->mm, ulStartAddrOrig); + if (psVMArea == NULL) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Couldn't find memory region containing start address %x", ulStartAddrOrig)); + + goto error; + } +#if defined(DEBUG) + psInfo->psVMArea = psVMArea; +#endif + + /* + * find_vma locates a region with an end point past a given + * virtual address. So check the address is actually in the region. + */ + if (ulStartAddrOrig < psVMArea->vm_start) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Start address %x is outside of the region returned by find_vma", ulStartAddrOrig)); + goto error; + } + + /* Now check the end address is in range */ + if (ulBeyondEndAddrOrig > psVMArea->vm_end) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: End address %x is outside of the region returned by find_vma", ulBeyondEndAddrOrig)); + goto error; + } + + /* Does the region represent memory mapped I/O? */ + if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != (VM_IO | VM_RESERVED)) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", psVMArea->vm_flags)); + goto error; + } + + /* We require read and write access */ + if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE)) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)", psVMArea->vm_flags)); + goto error; + } + + for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++) + { + IMG_CPU_PHYADDR CPUPhysAddr; + IMG_UINT32 ulPFN = 0; + + PVR_ASSERT(i < psInfo->iNumPages); + + if (!CPUVAddrToPFN(psVMArea, ulAddr, &ulPFN, &psInfo->ppsPages[i])) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Invalid CPU virtual address")); + + goto error; + } + if (psInfo->ppsPages[i] == NULL) + { +#if defined(CONFIG_TI_TILER) || defined(CONFIG_DRM_OMAP_DMM_TILER) + /* This could be tiler memory.*/ + IMG_UINT32 ui32TilerAddr = CPUAddrToTilerPhy(ulAddr); + if (ui32TilerAddr) + { + bHavePageStructs = IMG_TRUE; + psInfo->iNumPagesMapped++; + psInfo->psPhysAddr[i].uiAddr = ui32TilerAddr; + psSysPAddr[i].uiAddr = ui32TilerAddr; + continue; + } +#endif /* defined(CONFIG_TI_TILER) || defined(CONFIG_DRM_OMAP_DMM_TILER) */ + + bHaveNoPageStructs = IMG_TRUE; + } + else + { + bHavePageStructs = IMG_TRUE; + + psInfo->iNumPagesMapped++; + + PVR_ASSERT(ulPFN == page_to_pfn(psInfo->ppsPages[i])); + } + + CPUPhysAddr.uiAddr = ulPFN << PAGE_SHIFT; + if ((CPUPhysAddr.uiAddr >> PAGE_SHIFT) != ulPFN) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Page frame number out of range (%x)", ulPFN)); + + goto error; + } + + psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr); + psSysPAddr[i] = psInfo->psPhysAddr[i]; + } + PVR_ASSERT(i == psInfo->iNumPages); + +#if defined(VM_MIXEDMAP) + if ((psVMArea->vm_flags & VM_MIXEDMAP) != 0) + { + goto exit; + } +#endif + + if (bHavePageStructs && bHaveNoPageStructs) + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Region is VM_MIXEDMAP, but isn't marked as such")); + goto error; + } + + if (!bHaveNoPageStructs) + { + /* The ideal case; every page has a page structure */ + goto exit; + } + +#if defined(VM_PFNMAP) + if ((psVMArea->vm_flags & VM_PFNMAP) == 0) +#endif + { + PVR_DPF((PVR_DBG_ERROR, + "OSAcquirePhysPageAddr: Region is VM_PFNMAP, but isn't marked as such")); + goto error; + } + +exit: + PVR_ASSERT(bMMapSemHeld); + up_read(¤t->mm->mmap_sem); + + /* Return the cookie */ + *phOSWrapMem = (IMG_HANDLE)psInfo; + + if (bHaveNoPageStructs) + { + PVR_DPF((PVR_DBG_MESSAGE, + "OSAcquirePhysPageAddr: Region contains pages which can't be locked down (no page structures)")); + } + + PVR_ASSERT(psInfo->eType != 0); + + return PVRSRV_OK; + +error: + if (bMMapSemHeld) + { + up_read(¤t->mm->mmap_sem); + } + OSReleasePhysPageAddr((IMG_HANDLE)psInfo); + + PVR_ASSERT(eError != PVRSRV_OK); + + return eError; +} + +typedef void (*InnerCacheOp_t)(const void *pvStart, const void *pvEnd); + +#if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) +typedef void (*OuterCacheOp_t)(phys_addr_t uStart, phys_addr_t uEnd); +#else +typedef void (*OuterCacheOp_t)(unsigned long ulStart, unsigned long ulEnd); +#endif + +#if defined(CONFIG_OUTER_CACHE) + +typedef IMG_BOOL (*MemAreaToPhys_t)(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + unsigned long *pulStart); + +static IMG_BOOL VMallocAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + unsigned long *pulStart) +{ + *pulStart = vmalloc_to_pfn(pvRangeAddrStart + ui32PageNum * PAGE_SIZE) << PAGE_SHIFT; + return IMG_TRUE; +} + +static IMG_BOOL ExternalKVAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + unsigned long *pulStart) +{ + IMG_SYS_PHYADDR SysPAddr; + IMG_CPU_PHYADDR CpuPAddr; + SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageNumOffset + ui32PageNum]; + CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr); + *pulStart = CpuPAddr.uiAddr; + return IMG_TRUE; +} + +static IMG_BOOL AllocPagesAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + unsigned long *pulStart) +{ + struct page *pPage; + + pPage = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PageNumOffset + ui32PageNum]; + *pulStart = page_to_pfn(pPage) << PAGE_SHIFT; + return IMG_TRUE; +} + +static IMG_BOOL AllocPagesSparseAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + unsigned long *pulStart) +{ + IMG_UINT32 ui32VirtOffset = (ui32PageNumOffset + ui32PageNum) << PAGE_SHIFT; + IMG_UINT32 ui32PhysOffset; + struct page *pPage; + + if (BM_VirtOffsetToPhysical(psLinuxMemArea->hBMHandle, ui32VirtOffset, &ui32PhysOffset)) + { + PVR_ASSERT(ui32PhysOffset <= ui32VirtOffset); + pPage = psLinuxMemArea->uData.sPageList.ppsPageList[ui32PhysOffset >> PAGE_SHIFT]; + *pulStart = page_to_pfn(pPage) << PAGE_SHIFT; + return IMG_TRUE; + } + + return IMG_FALSE; +} + + +static IMG_BOOL IONAreaToPhys(LinuxMemArea *psLinuxMemArea, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32PageNumOffset, + IMG_UINT32 ui32PageNum, + unsigned long *pulStart) +{ + IMG_CPU_PHYADDR CpuPAddr; + CpuPAddr = psLinuxMemArea->uData.sIONTilerAlloc.pCPUPhysAddrs[ui32PageNumOffset + ui32PageNum]; + *pulStart = CpuPAddr.uiAddr; + return IMG_TRUE; +} + +#endif /* defined(CONFIG_OUTER_CACHE) */ + +/* g_sMMapMutex must be held while this function is called */ + +static +IMG_VOID *FindMMapBaseVAddr(struct list_head *psMMapOffsetStructList, + IMG_VOID *pvRangeAddrStart, IMG_UINT32 ui32Length) +{ + PKV_OFFSET_STRUCT psOffsetStruct; + IMG_VOID *pvMinVAddr; + + /* There's no kernel-virtual for this type of allocation, so if + * we're flushing it, it must be user-virtual, and therefore + * have a mapping. + */ + list_for_each_entry(psOffsetStruct, psMMapOffsetStructList, sAreaItem) + { + if(OSGetCurrentProcessIDKM() != psOffsetStruct->ui32PID) + continue; + + pvMinVAddr = (IMG_VOID *)psOffsetStruct->ui32UserVAddr; + + /* Within permissible range */ + if(pvRangeAddrStart >= pvMinVAddr && + ui32Length <= psOffsetStruct->ui32RealByteSize) + return pvMinVAddr; + } + + return IMG_NULL; +} + +extern PVRSRV_LINUX_MUTEX g_sMMapMutex; + +static inline void DoInnerCacheOp(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length, + InnerCacheOp_t pfnInnerCacheOp) +{ + LinuxMemArea *psLinuxMemArea = hOSMemHandle; + + if (!psLinuxMemArea->hBMHandle) + { + pfnInnerCacheOp(pvRangeAddrStart, pvRangeAddrStart + ui32Length); + } + else + { + IMG_UINT32 ui32ByteRemain = ui32Length; + IMG_UINT32 ui32BytesToDo = PAGE_SIZE - (((IMG_UINT32) pvRangeAddrStart) & (~PAGE_MASK)); + IMG_UINT8 *pbDo = (IMG_UINT8 *) pvRangeAddrStart; + + while(ui32ByteRemain) + { + if (BM_MapPageAtOffset(psLinuxMemArea->hBMHandle, ui32ByteOffset + (ui32Length - ui32ByteRemain))) + { + pfnInnerCacheOp(pbDo, pbDo + ui32BytesToDo); + } + pbDo += ui32BytesToDo; + ui32ByteRemain -= ui32BytesToDo; + ui32BytesToDo = MIN(ui32ByteRemain, PAGE_SIZE); + } + } +} + +static +IMG_BOOL CheckExecuteCacheOp(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length, + InnerCacheOp_t pfnInnerCacheOp, + OuterCacheOp_t pfnOuterCacheOp) +{ + LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle; + IMG_UINT32 ui32AreaLength, ui32AreaOffset = 0; + struct list_head *psMMapOffsetStructList; + IMG_VOID *pvMinVAddr; + +#if defined(CONFIG_OUTER_CACHE) + MemAreaToPhys_t pfnMemAreaToPhys = IMG_NULL; + IMG_UINT32 ui32PageNumOffset = 0; +#endif + + PVR_ASSERT(psLinuxMemArea != IMG_NULL); + + LinuxLockMutex(&g_sMMapMutex); + + psMMapOffsetStructList = &psLinuxMemArea->sMMapOffsetStructList; + ui32AreaLength = psLinuxMemArea->ui32ByteSize; + + /* + Don't check the length in the case of sparse mappings as + we only know the physical length not the virtual + */ + if (!psLinuxMemArea->hBMHandle) + { + PVR_ASSERT(ui32Length <= ui32AreaLength); + } + + if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC) + { + ui32AreaOffset = psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset; + psLinuxMemArea = psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea; + } + + /* Recursion surely isn't possible? */ + PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC); + + switch(psLinuxMemArea->eAreaType) + { + case LINUX_MEM_AREA_VMALLOC: + { + if(is_vmalloc_addr(pvRangeAddrStart)) + { + pvMinVAddr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress + ui32AreaOffset; + + /* Outside permissible range */ + if(pvRangeAddrStart < pvMinVAddr) + goto err_blocked; + + DoInnerCacheOp(hOSMemHandle, + ui32ByteOffset, + pvRangeAddrStart, + ui32Length, + pfnInnerCacheOp); + } + else + { + /* If this isn't a vmalloc address, assume we're flushing by + * user-virtual. Compute the mmap base vaddr and use this to + * compute the offset in vmalloc space. + */ + + pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList, + pvRangeAddrStart, ui32Length); + if(!pvMinVAddr) + goto err_blocked; + + DoInnerCacheOp(hOSMemHandle, + ui32ByteOffset, + pvRangeAddrStart, + ui32Length, + pfnInnerCacheOp); + +#if defined(CONFIG_OUTER_CACHE) + /* + * We don't need to worry about cache aliasing here because + * we have already flushed the virtually-indexed caches (L1 + * etc.) by the supplied user-virtual addresses. + * + * The vmalloc address will only be used to determine + * affected physical pages for outer cache flushing. + */ + pvRangeAddrStart = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress + + (ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr); + } + + pfnMemAreaToPhys = VMallocAreaToPhys; +#else /* defined(CONFIG_OUTER_CACHE) */ + } +#endif /* defined(CONFIG_OUTER_CACHE) */ + break; + } + + case LINUX_MEM_AREA_EXTERNAL_KV: + { + /* We'll only see bPhysContig for frame buffers, and we shouldn't + * be flushing those (they're write combined or uncached). + */ + if (psLinuxMemArea->uData.sExternalKV.bPhysContig == IMG_TRUE) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Attempt to flush contiguous external memory", __func__)); + goto err_blocked; + } + + /* If it has a kernel virtual address, something odd has happened. + * We expect EXTERNAL_KV _only_ from the wrapping of ALLOC_PAGES. + */ + if (psLinuxMemArea->uData.sExternalKV.pvExternalKV != IMG_NULL) + { + PVR_DPF((PVR_DBG_WARNING, "%s: Attempt to flush external memory with a kernel virtual address", __func__)); + goto err_blocked; + } + + pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList, + pvRangeAddrStart, ui32Length); + if(!pvMinVAddr) + goto err_blocked; + + DoInnerCacheOp(hOSMemHandle, + ui32ByteOffset, + pvRangeAddrStart, + ui32Length, + pfnInnerCacheOp); + +#if defined(CONFIG_OUTER_CACHE) + ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT; + pfnMemAreaToPhys = ExternalKVAreaToPhys; +#endif + break; + } + + case LINUX_MEM_AREA_ION: + { + pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList, + pvRangeAddrStart, ui32Length); + if(!pvMinVAddr) + goto err_blocked; + + DoInnerCacheOp(hOSMemHandle, + ui32ByteOffset, + pvRangeAddrStart, + ui32Length, + pfnInnerCacheOp); + +#if defined(CONFIG_OUTER_CACHE) + ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT; + pfnMemAreaToPhys = IONAreaToPhys; +#endif + break; + } + + case LINUX_MEM_AREA_ALLOC_PAGES: + { + pvMinVAddr = FindMMapBaseVAddr(psMMapOffsetStructList, + pvRangeAddrStart, ui32Length); + if(!pvMinVAddr) + goto err_blocked; + + DoInnerCacheOp(hOSMemHandle, + ui32ByteOffset, + pvRangeAddrStart, + ui32Length, + pfnInnerCacheOp); + +#if defined(CONFIG_OUTER_CACHE) + ui32PageNumOffset = ((ui32AreaOffset & PAGE_MASK) + (pvRangeAddrStart - pvMinVAddr)) >> PAGE_SHIFT; + if (psLinuxMemArea->hBMHandle) + { + pfnMemAreaToPhys = AllocPagesSparseAreaToPhys; + } + else + { + pfnMemAreaToPhys = AllocPagesAreaToPhys; + } +#endif + break; + } + + default: + PVR_DBG_BREAK; + } + + LinuxUnLockMutex(&g_sMMapMutex); + +#if defined(CONFIG_OUTER_CACHE) + PVR_ASSERT(pfnMemAreaToPhys != IMG_NULL); + + /* Outer caches need some more work, to get a list of physical addresses */ + { + unsigned long ulStart, ulEnd, ulLength, ulStartOffset, ulEndOffset; + IMG_UINT32 i, ui32NumPages; + IMG_BOOL bValidPage; + + /* Length and offsets of flush region WRT page alignment */ + ulLength = (unsigned long)ui32Length; + ulStartOffset = ((unsigned long)pvRangeAddrStart) & (PAGE_SIZE - 1); + ulEndOffset = ((unsigned long)pvRangeAddrStart + ulLength) & (PAGE_SIZE - 1); + + /* The affected pages, rounded up */ + ui32NumPages = (ulStartOffset + ulLength + PAGE_SIZE - 1) >> PAGE_SHIFT; + + for(i = 0; i < ui32NumPages; i++) + { + bValidPage = pfnMemAreaToPhys(psLinuxMemArea, pvRangeAddrStart, + ui32PageNumOffset, i, &ulStart); + if (bValidPage) + { + ulEnd = ulStart + PAGE_SIZE; + + if(i == ui32NumPages - 1 && ulEndOffset != 0) + ulEnd = ulStart + ulEndOffset; + + if(i == 0) + ulStart += ulStartOffset; + + pfnOuterCacheOp(ulStart, ulEnd); + } + } + } +#endif + + return IMG_TRUE; + +err_blocked: + PVR_DPF((PVR_DBG_WARNING, "%s: Blocked cache op on virtual range " + "%p-%p (type %d)", __func__, + pvRangeAddrStart, pvRangeAddrStart + ui32Length, + psLinuxMemArea->eAreaType)); + LinuxUnLockMutex(&g_sMMapMutex); + return IMG_FALSE; +} + +#if defined(__i386__) + +#define ROUND_UP(x,a) (((x) + (a) - 1) & ~((a) - 1)) + +static void per_cpu_cache_flush(void *arg) +{ + PVR_UNREFERENCED_PARAMETER(arg); + wbinvd(); +} + +static void x86_flush_cache_range(const void *pvStart, const void *pvEnd) +{ + IMG_BYTE *pbStart = (IMG_BYTE *)pvStart; + IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd; + IMG_BYTE *pbBase; + + pbEnd = (IMG_BYTE *)ROUND_UP((IMG_UINTPTR_T)pbEnd, + boot_cpu_data.x86_clflush_size); + + mb(); + for(pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size) + { + clflush(pbBase); + } + mb(); +} + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID) +{ + /* No clean feature on x86 */ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +} + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID) +{ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +} + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + /* Write-back and invalidate */ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, pvRangeAddrStart, ui32Length, + x86_flush_cache_range, IMG_NULL); +} + +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + /* No clean feature on x86 */ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, pvRangeAddrStart, ui32Length, + x86_flush_cache_range, IMG_NULL); +} + +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + /* No invalidate-only support */ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, pvRangeAddrStart, ui32Length, + x86_flush_cache_range, IMG_NULL); +} + +#else /* defined(__i386__) */ + +#if defined(__arm__) + +static void per_cpu_cache_flush(void *arg) +{ + PVR_UNREFERENCED_PARAMETER(arg); + flush_cache_all(); +} + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID) +{ + /* No full (inner) cache clean op */ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +#if defined(CONFIG_OUTER_CACHE) + outer_clean_range(0, ULONG_MAX); +#endif +} + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID) +{ + ON_EACH_CPU(per_cpu_cache_flush, NULL, 1); +#if defined(CONFIG_OUTER_CACHE) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + /* To use the "deferred flush" (not clean) DDK feature you need a kernel + * implementation of outer_flush_all() for ARM CPUs with an outer cache + * controller (e.g. PL310, common with Cortex A9 and later). + * + * Reference DDKs don't require this functionality, as they will only + * clean the cache, never flush (clean+invalidate) it. + */ + outer_flush_all(); +#endif +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)) +static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd) +{ + return (size_t)((char *)pvEnd - (char *)pvStart); +} +#endif + +static void pvr_dmac_inv_range(const void *pvStart, const void *pvEnd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) + dmac_inv_range(pvStart, pvEnd); +#else + dmac_map_area(pvStart, pvr_dmac_range_len(pvStart, pvEnd), DMA_FROM_DEVICE); +#endif +} + +static void pvr_dmac_clean_range(const void *pvStart, const void *pvEnd) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) + dmac_clean_range(pvStart, pvEnd); +#else + dmac_map_area(pvStart, pvr_dmac_range_len(pvStart, pvEnd), DMA_TO_DEVICE); +#endif +} + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + dmac_flush_range, outer_flush_range); +} + +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dmac_clean_range, outer_clean_range); +} + +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dmac_inv_range, outer_inv_range); +} + +#else /* defined(__arm__) */ + +#if defined(__mips__) +/* + * dmac cache functions are supposed to be used for dma + * memory which comes from dma-able memory. However examining + * the implementation of dmac cache functions and experimenting, + * can assert that dmac functions are safe to use for high-mem + * memory as well for our OS{Clean/Flush/Invalidate}Cache functions + * + */ + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID) +{ + /* dmac functions flush full cache if size is larger than + * p-cache size. This is a workaround for the fact that + * __flush_cache_all is not an exported symbol. Please + * replace with custom function if available in latest + * version of linux being used. + * Arbitrary large number (1MB) which should be larger than + * mips p-cache sizes for some time in future. + * */ + dma_cache_wback(0, 0x100000); +} + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID) +{ + /* dmac functions flush full cache if size is larger than + * p-cache size. This is a workaround for the fact that + * __flush_cache_all is not an exported symbol. Please + * replace with custom function if available in latest + * version of linux being used. + * Arbitrary large number (1MB) which should be larger than + * mips p-cache sizes for some time in future. + * */ + dma_cache_wback_inv(0, 0x100000); +} + +static inline IMG_UINT32 pvr_dma_range_len(const void *pvStart, const void *pvEnd) +{ + return (IMG_UINT32)((char *)pvEnd - (char *)pvStart); +} + +static void pvr_dma_cache_wback_inv(const void *pvStart, const void *pvEnd) +{ + dma_cache_wback_inv((IMG_UINTPTR_T)pvStart, pvr_dma_range_len(pvStart, pvEnd)); +} + +static void pvr_dma_cache_wback(const void *pvStart, const void *pvEnd) +{ + dma_cache_wback((IMG_UINTPTR_T)pvStart, pvr_dma_range_len(pvStart, pvEnd)); +} + +static void pvr_dma_cache_inv(const void *pvStart, const void *pvEnd) +{ + dma_cache_inv((IMG_UINTPTR_T)pvStart, pvr_dma_range_len(pvStart, pvEnd)); +} + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dma_cache_wback_inv, IMG_NULL); +} + +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dma_cache_wback, IMG_NULL); +} + +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + return CheckExecuteCacheOp(hOSMemHandle, ui32ByteOffset, + pvRangeAddrStart, ui32Length, + pvr_dma_cache_inv, IMG_NULL); +} + +#else /* defined(__mips__) */ + +#error "Implement CPU cache flush/clean/invalidate primitives for this CPU!" + +#endif /* defined(__mips__) */ + +#endif /* defined(__arm__) */ + +#endif /* defined(__i386__) */ + +typedef struct _AtomicStruct +{ + atomic_t RefCount; +} AtomicStruct; + +PVRSRV_ERROR OSAtomicAlloc(IMG_PVOID *ppvRefCount) +{ + AtomicStruct *psRefCount; + + psRefCount = kmalloc(sizeof(AtomicStruct), GFP_KERNEL); + if (psRefCount == NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + atomic_set(&psRefCount->RefCount, 0); + + *ppvRefCount = psRefCount; + return PVRSRV_OK; +} + +IMG_VOID OSAtomicFree(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + PVR_ASSERT(atomic_read(&psRefCount->RefCount) == 0); + kfree(psRefCount); +} + +IMG_VOID OSAtomicInc(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + atomic_inc(&psRefCount->RefCount); +} + +IMG_BOOL OSAtomicDecAndTest(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + return atomic_dec_and_test(&psRefCount->RefCount) ? IMG_TRUE:IMG_FALSE; +} + +IMG_UINT32 OSAtomicRead(IMG_PVOID pvRefCount) +{ + AtomicStruct *psRefCount = pvRefCount; + + return (IMG_UINT32) atomic_read(&psRefCount->RefCount); +} + +IMG_VOID OSReleaseBridgeLock(IMG_VOID) +{ + LinuxUnLockMutex(&gPVRSRVLock); +} + +IMG_VOID OSReacquireBridgeLock(IMG_VOID) +{ + LinuxLockMutex(&gPVRSRVLock); +} + +typedef struct _OSTime +{ + unsigned long ulTime; +} OSTime; + +PVRSRV_ERROR OSTimeCreateWithUSOffset(IMG_PVOID *pvRet, IMG_UINT32 ui32USOffset) +{ + OSTime *psOSTime; + + psOSTime = kmalloc(sizeof(OSTime), GFP_KERNEL); + if (psOSTime == IMG_NULL) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + + psOSTime->ulTime = usecs_to_jiffies(jiffies_to_usecs(jiffies) + ui32USOffset); + *pvRet = psOSTime; + return PVRSRV_OK; +} + + +IMG_BOOL OSTimeHasTimePassed(IMG_PVOID pvData) +{ + OSTime *psOSTime = pvData; + + if (time_is_before_jiffies(psOSTime->ulTime)) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +IMG_VOID OSTimeDestroy(IMG_PVOID pvData) +{ + kfree(pvData); +} + +IMG_VOID OSGetCurrentProcessNameKM(IMG_CHAR *pszName, IMG_UINT32 ui32Size) +{ + strncpy(pszName, current->comm, MIN(ui32Size,TASK_COMM_LEN)); +} + +/* One time osfunc initialisation */ +PVRSRV_ERROR PVROSFuncInit(IMG_VOID) +{ +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + { + psTimerWorkQueue = create_workqueue("pvr_timer"); + if (psTimerWorkQueue == NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__)); + return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; + + } + } +#endif + +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || defined(PVR_LINUX_TIMERS_USING_SHARED_WORKQUEUE) + { + IMG_UINT32 ui32i; + + for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) + { + TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i]; + + INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack); + } + } +#endif + +#if defined (SUPPORT_ION) + { + PVRSRV_ERROR eError; + + eError = IonInit(); + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: IonInit failed", __FUNCTION__)); + } + } +#endif + return PVRSRV_OK; +} + +/* + * Osfunc deinitialisation. + * Note that PVROSFuncInit may not have been called + */ +IMG_VOID PVROSFuncDeInit(IMG_VOID) +{ +#if defined (SUPPORT_ION) + IonDeinit(); +#endif +#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) + if (psTimerWorkQueue != NULL) + { + destroy_workqueue(psTimerWorkQueue); + } +#endif +} diff --git a/pvr-source/services4/srvkm/env/linux/osperproc.c b/pvr-source/services4/srvkm/env/linux/osperproc.c new file mode 100644 index 0000000..a22b461 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/osperproc.c @@ -0,0 +1,146 @@ +/*************************************************************************/ /*! +@Title Linux specific per process data functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "services_headers.h" +#include "osperproc.h" + +#include "env_perproc.h" +#include "proc.h" +#if defined (SUPPORT_ION) +#include "linux/ion.h" + +extern struct ion_device *psIonDev; +#endif +extern IMG_UINT32 gui32ReleasePID; + +PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData) +{ + PVRSRV_ERROR eError; + IMG_HANDLE hBlockAlloc; + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; + + eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_ENV_PER_PROCESS_DATA), + phOsPrivateData, + &hBlockAlloc, + "Environment per Process Data"); + + if (eError != PVRSRV_OK) + { + *phOsPrivateData = IMG_NULL; + + PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError)); + return eError; + } + + psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData; + OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc)); + + psEnvPerProc->hBlockAlloc = hBlockAlloc; + + /* Linux specific mmap processing */ + LinuxMMapPerProcessConnect(psEnvPerProc); + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + /* Linked list of PVRSRV_FILE_PRIVATE_DATA structures */ + INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead); +#endif +#if defined(SUPPORT_ION) + OSSNPrintf(psEnvPerProc->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%d", OSGetCurrentProcessIDKM()); + psEnvPerProc->psIONClient = + ion_client_create(psIonDev, + 1 << ION_HEAP_TYPE_SYSTEM_CONTIG | + 1 << ION_HEAP_TYPE_SYSTEM, + psEnvPerProc->azIonClientName); + + if (IS_ERR_OR_NULL(psEnvPerProc->psIONClient)) + { + PVR_DPF((PVR_DBG_ERROR, "OSPerProcessPrivateDataInit: Couldn't create " + "ion client for per process data")); + return PVRSRV_ERROR_OUT_OF_MEMORY; + } +#endif /* SUPPORT_ION */ + return PVRSRV_OK; +} + +PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + PVRSRV_ERROR eError; + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; + + if (hOsPrivateData == IMG_NULL) + { + return PVRSRV_OK; + } + + psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData; + + /* Linux specific mmap processing */ + LinuxMMapPerProcessDisconnect(psEnvPerProc); + + /* Remove per process /proc entries */ + RemovePerProcessProcDir(psEnvPerProc); + + eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, + sizeof(PVRSRV_ENV_PER_PROCESS_DATA), + hOsPrivateData, + psEnvPerProc->hBlockAlloc); + /*not nulling pointer, copy on stack*/ + + if (eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError)); + } + + return PVRSRV_OK; +} + +PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + return LinuxMMapPerProcessHandleOptions(psHandleBase); +} + +IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID) +{ + if(!gui32ReleasePID) + return NULL; + return PVRSRVPerProcessPrivateData(gui32ReleasePID); +} diff --git a/pvr-source/services4/srvkm/env/linux/pdump.c b/pvr-source/services4/srvkm/env/linux/pdump.c new file mode 100644 index 0000000..0124737 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/pdump.c @@ -0,0 +1,804 @@ +/*************************************************************************/ /*! +@Title Parameter dump macro target routines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if defined (SUPPORT_SGX) || defined (SUPPORT_VGX) +#if defined (PDUMP) + +#include <asm/atomic.h> +#include <stdarg.h> +#if defined (SUPPORT_SGX) +#include "sgxdefs.h" /* Is this still needed? */ +#endif +#include "services_headers.h" + +#include "pvrversion.h" +#include "pvr_debug.h" + +#include "dbgdrvif.h" +#if defined (SUPPORT_SGX) +#include "sgxmmu.h"/* Is this still needed? */ +#endif +#include "mm.h" +#include "pdump_km.h" +#include "pdump_int.h" + +#include <linux/kernel.h> // sprintf +#include <linux/string.h> // strncpy, strlen + +static IMG_BOOL PDumpWriteString2 (IMG_CHAR * pszString, IMG_UINT32 ui32Flags); +static IMG_BOOL PDumpWriteILock (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags); +static IMG_VOID DbgSetFrame (PDBG_STREAM psStream, IMG_UINT32 ui32Frame); +static IMG_VOID DbgSetMarker (PDBG_STREAM psStream, IMG_UINT32 ui32Marker); + +#define PDUMP_DATAMASTER_PIXEL (1) +#define PDUMP_DATAMASTER_EDM (3) + +/* + Maximum file size to split output files +*/ +#define MAX_FILE_SIZE 0x40000000 + +static atomic_t gsPDumpSuspended = ATOMIC_INIT(0); + +static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL; + + + +IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2", + "ScriptStream2", + "DriverInfoStream"}; +typedef struct PDBG_PDUMP_STATE_TAG +{ + PDBG_STREAM psStream[PDUMP_NUM_STREAMS]; + IMG_UINT32 ui32ParamFileNum; + + IMG_CHAR *pszMsg; + IMG_CHAR *pszScript; + IMG_CHAR *pszFile; + +} PDBG_PDUMP_STATE; + +static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL}; + +#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 +#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 +#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1 + + + + +static inline IMG_BOOL PDumpSuspended(IMG_VOID) +{ + return (atomic_read(&gsPDumpSuspended) != 0) ? IMG_TRUE : IMG_FALSE; +} + +/*! + * \name PDumpOSGetScriptString + */ +PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, + IMG_UINT32 *pui32MaxLen) +{ + *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript; + *pui32MaxLen = SZ_SCRIPT_SIZE_MAX; + if ((!*phScript) || PDumpSuspended()) + { + return PVRSRV_ERROR_PDUMP_NOT_ACTIVE; + } + return PVRSRV_OK; +} + +/*! + * \name PDumpOSGetMessageString + */ +PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, + IMG_UINT32 *pui32MaxLen) +{ + *ppszMsg = gsDBGPdumpState.pszMsg; + *pui32MaxLen = SZ_MSG_SIZE_MAX; + if ((!*ppszMsg) || PDumpSuspended()) + { + return PVRSRV_ERROR_PDUMP_NOT_ACTIVE; + } + return PVRSRV_OK; +} + +/*! + * \name PDumpOSGetFilenameString + */ +PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, + IMG_UINT32 *pui32MaxLen) +{ + *ppszFile = gsDBGPdumpState.pszFile; + *pui32MaxLen = SZ_FILENAME_SIZE_MAX; + if ((!*ppszFile) || PDumpSuspended()) + { + return PVRSRV_ERROR_PDUMP_NOT_ACTIVE; + } + return PVRSRV_OK; +} + +/*! + * \name PDumpOSWriteString2 + */ +IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags) +{ + return PDumpWriteString2(hScript, ui32Flags); +} + +/*! + * \name PDumpOSBufprintf + */ +PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) +{ + IMG_CHAR* pszBuf = hBuf; + IMG_INT32 n; + va_list vaArgs; + + va_start(vaArgs, pszFormat); + + n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs); + + va_end(vaArgs); + + if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */ + { + PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete.")); + + return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + +#if defined(PDUMP_DEBUG_OUTFILES) + g_ui32EveryLineCounter++; +#endif + return PVRSRV_OK; +} + +/*! + * \name PDumpOSVSprintf + */ +PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) +{ + IMG_INT32 n; + + n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs); + + if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */ + { + PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete.")); + + return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + + return PVRSRV_OK; +} + +/*! + * \name PDumpOSDebugPrintf + */ +IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) +{ + PVR_UNREFERENCED_PARAMETER(pszFormat); + + /* FIXME: Implement using services PVR_DBG or otherwise with kprintf */ +} + +/*! + * \name PDumpOSSprintf + */ +PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) +{ + IMG_INT32 n; + va_list vaArgs; + + va_start(vaArgs, pszFormat); + + n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs); + + va_end(vaArgs); + + if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */ + { + PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete.")); + + return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW; + } + + return PVRSRV_OK; +} + +/*! + * \name PDumpOSBuflen + */ +IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax) +{ + IMG_CHAR* pszBuf = hBuffer; + IMG_UINT32 ui32Count = 0; + + while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) ) + { + ui32Count++; + } + return(ui32Count); +} + +/*! + * \name PDumpOSVerifyLineEnding + */ +IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax) +{ + IMG_UINT32 ui32Count; + IMG_CHAR* pszBuf = hBuffer; + + /* strlen */ + ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax); + + /* Put \r \n sequence at the end if it isn't already there */ + if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax)) + { + pszBuf[ui32Count] = '\n'; + ui32Count++; + pszBuf[ui32Count] = '\0'; + } + if ((ui32Count >= 2) && (pszBuf[ui32Count-2] != '\r') && (ui32Count<ui32BufferSizeMax)) + { + pszBuf[ui32Count-1] = '\r'; + pszBuf[ui32Count] = '\n'; + ui32Count++; + pszBuf[ui32Count] = '\0'; + } +} + +/*! + * \name PDumpOSGetStream + */ +IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream) +{ + return (IMG_HANDLE)gsDBGPdumpState.psStream[ePDumpStream]; +} + +/*! + * \name PDumpOSGetStreamOffset + */ +IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream) +{ + PDBG_STREAM psStream = gsDBGPdumpState.psStream[ePDumpStream]; + return gpfnDbgDrv->pfnGetStreamOffset(psStream); +} + +/*! + * \name PDumpOSGetParamFileNum + */ +IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID) +{ + return gsDBGPdumpState.ui32ParamFileNum; +} + +/*! + * \name PDumpOSWriteString + */ +IMG_BOOL PDumpOSWriteString(IMG_HANDLE hStream, + IMG_UINT8 *psui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags) +{ + PDBG_STREAM psStream = (PDBG_STREAM)hStream; + return PDumpWriteILock(psStream, + psui8Data, + ui32Size, + ui32Flags); +} + +/*! + * \name PDumpOSCheckForSplitting + */ +IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags) +{ + /* File size limit not implemented for this OS. + */ + PVR_UNREFERENCED_PARAMETER(hStream); + PVR_UNREFERENCED_PARAMETER(ui32Size); + PVR_UNREFERENCED_PARAMETER(ui32Flags); +} + +/*! + * \name PDumpOSJTInitialised + */ +IMG_BOOL PDumpOSJTInitialised(IMG_VOID) +{ + if(gpfnDbgDrv) + { + return IMG_TRUE; + } + return IMG_FALSE; +} + +/*! + * \name PDumpOSIsSuspended + */ +inline IMG_BOOL PDumpOSIsSuspended(IMG_VOID) +{ + return (atomic_read(&gsPDumpSuspended) != 0) ? IMG_TRUE : IMG_FALSE; +} + +/*! + * \name PDumpOSCPUVAddrToDevPAddr + */ +IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_UINT8 *pui8LinAddr, + IMG_UINT32 ui32PageSize, + IMG_DEV_PHYADDR *psDevPAddr) +{ + IMG_CPU_PHYADDR sCpuPAddr; + + PVR_UNREFERENCED_PARAMETER(pui8LinAddr); + PVR_UNREFERENCED_PARAMETER(ui32PageSize); /* for when no assert */ + + /* Caller must now alway supply hOSMemHandle, even though we only (presently) + use it here in the linux implementation */ + + PVR_ASSERT (hOSMemHandle != IMG_NULL); + + sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset); + PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0); + + /* convert CPU physical addr to device physical */ + *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr); +} + +/*! + * \name PDumpOSCPUVAddrToPhysPages + */ +IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_PUINT8 pui8LinAddr, + IMG_UINT32 ui32DataPageMask, + IMG_UINT32 *pui32PageOffset) +{ + if(hOSMemHandle) + { + /* + * If a Services memory handle is provided then use it. + */ + IMG_CPU_PHYADDR sCpuPAddr; + + PVR_UNREFERENCED_PARAMETER(pui8LinAddr); + + sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset); + *pui32PageOffset = sCpuPAddr.uiAddr & ui32DataPageMask; + } + else + { + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32Offset); + + *pui32PageOffset = ((IMG_UINT32)pui8LinAddr & ui32DataPageMask); + } +} + +/*! + * \name PDumpOSDebugDriverWrite + */ +IMG_UINT32 PDumpOSDebugDriverWrite( PDBG_STREAM psStream, + PDUMP_DDWMODE eDbgDrvWriteMode, + IMG_UINT8 *pui8Data, + IMG_UINT32 ui32BCount, + IMG_UINT32 ui32Level, + IMG_UINT32 ui32DbgDrvFlags) +{ + switch(eDbgDrvWriteMode) + { + case PDUMP_WRITE_MODE_CONTINUOUS: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + return gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, ui32Level); + case PDUMP_WRITE_MODE_LASTFRAME: + return gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, ui32Level, ui32DbgDrvFlags); + case PDUMP_WRITE_MODE_BINCM: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + return gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, ui32Level); + case PDUMP_WRITE_MODE_PERSISTENT: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + return gpfnDbgDrv->pfnWritePersist(psStream, pui8Data, ui32BCount, ui32Level); + default: + PVR_UNREFERENCED_PARAMETER(ui32DbgDrvFlags); + break; + } + return 0xFFFFFFFFU; +} + +/*! + * \name PDumpOSReleaseExecution + */ +IMG_VOID PDumpOSReleaseExecution(IMG_VOID) +{ + OSReleaseThreadQuanta(); +} + +/************************************************************************** + * Function Name : PDumpInit + * Outputs : None + * Returns : + * Description : Reset connection to vldbgdrv + * Then try to connect to PDUMP streams +**************************************************************************/ +IMG_VOID PDumpInit(IMG_VOID) +{ + IMG_UINT32 i; + DBGKM_CONNECT_NOTIFIER sConnectNotifier; + + /* If we tried this earlier, then we might have connected to the driver + * But if pdump.exe was running then the stream connected would fail + */ + if (!gpfnDbgDrv) + { + DBGDrvGetServiceTable(&gpfnDbgDrv); + + + // If something failed then no point in trying to connect streams + if (gpfnDbgDrv == IMG_NULL) + { + return; + } + + /* + * Pass the connection notify callback + */ + sConnectNotifier.pfnConnectNotifier = &PDumpConnectionNotify; + gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier); + + if(!gsDBGPdumpState.pszFile) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0, + "Filename string") != PVRSRV_OK) + { + goto init_failed; + } + } + + if(!gsDBGPdumpState.pszMsg) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0, + "Message string") != PVRSRV_OK) + { + goto init_failed; + } + } + + if(!gsDBGPdumpState.pszScript) + { + if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0, + "Script string") != PVRSRV_OK) + { + goto init_failed; + } + } + + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i], + DEBUG_CAPMODE_FRAMED, + DEBUG_OUTMODE_STREAMENABLE, + 0, + 10); + + gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1); + gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0); + } + + PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME); + PDUMPCOMMENT("Driver Product Version: %s (%s)", PVRVERSION_STRING, PVRVERSION_FAMILY); + PDUMPCOMMENT("Start of Init Phase"); + } + + return; + +init_failed: + + if(gsDBGPdumpState.pszFile) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0); + gsDBGPdumpState.pszFile = IMG_NULL; + } + + if(gsDBGPdumpState.pszScript) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0); + gsDBGPdumpState.pszScript = IMG_NULL; + } + + if(gsDBGPdumpState.pszMsg) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0); + gsDBGPdumpState.pszMsg = IMG_NULL; + } + + /* + * Remove the connection notify callback + */ + sConnectNotifier.pfnConnectNotifier = 0; + gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier); + + gpfnDbgDrv = IMG_NULL; +} + + +IMG_VOID PDumpDeInit(IMG_VOID) +{ + IMG_UINT32 i; + DBGKM_CONNECT_NOTIFIER sConnectNotifier; + + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]); + } + + if(gsDBGPdumpState.pszFile) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0); + gsDBGPdumpState.pszFile = IMG_NULL; + } + + if(gsDBGPdumpState.pszScript) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0); + gsDBGPdumpState.pszScript = IMG_NULL; + } + + if(gsDBGPdumpState.pszMsg) + { + OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0); + gsDBGPdumpState.pszMsg = IMG_NULL; + } + + /* + * Remove the connection notify callback + */ + sConnectNotifier.pfnConnectNotifier = 0; + gpfnDbgDrv->pfnSetConnectNotifier(sConnectNotifier); + + gpfnDbgDrv = IMG_NULL; +} + +/************************************************************************** + * Function Name : PDumpStartInitPhaseKM + * Inputs : None + * Outputs : None + * Returns : None + * Description : Resume init phase state +**************************************************************************/ +PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID) +{ + IMG_UINT32 i; + + if (gpfnDbgDrv) + { + PDUMPCOMMENT("Start Init Phase"); + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.psStream[i]); + } + } + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpStopInitPhaseKM + * Inputs : None + * Outputs : None + * Returns : None + * Description : End init phase state +**************************************************************************/ +PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID) +{ + IMG_UINT32 i; + + if (gpfnDbgDrv) + { + PDUMPCOMMENT("Stop Init Phase"); + + for(i=0; i < PDUMP_NUM_STREAMS; i++) + { + gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.psStream[i]); + } + } + return PVRSRV_OK; +} + +/************************************************************************** + * Function Name : PDumpIsLastCaptureFrameKM + * Inputs : None + * Outputs : None + * Returns : True or false + * Description : Tests whether the current frame is being pdumped +**************************************************************************/ +IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID) +{ + return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]); +} + + +/************************************************************************** + * Function Name : PDumpIsCaptureFrameKM + * Inputs : None + * Outputs : None + * Returns : True or false + * Description : Tests whether the current frame is being pdumped +**************************************************************************/ +IMG_BOOL PDumpOSIsCaptureFrameKM(IMG_VOID) +{ + if (PDumpSuspended()) + { + return IMG_FALSE; + } + return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE); +} + +/************************************************************************** + * Function Name : PDumpSetFrameKM + * Inputs : None + * Outputs : None + * Returns : None + * Description : Sets a frame +**************************************************************************/ +PVRSRV_ERROR PDumpOSSetFrameKM(IMG_UINT32 ui32Frame) +{ + IMG_UINT32 ui32Stream; + + for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++) + { + if (gsDBGPdumpState.psStream[ui32Stream]) + { + DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame); + } + } + + return PVRSRV_OK; +} + + +/***************************************************************************** + FUNCTION : PDumpWriteString2 + + PURPOSE : + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags) +{ + return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags); +} + + +/***************************************************************************** + FUNCTION : PDumpWriteILock + + PURPOSE : Writes, making sure it all goes... + + PARAMETERS : + + RETURNS : +*****************************************************************************/ +static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags) +{ + IMG_UINT32 ui32Written = 0; + if ((psStream == IMG_NULL) || PDumpSuspended() || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0)) + { + PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteILock: Failed to write 0x%x bytes to stream 0x%x", ui32Count, (IMG_UINT32)psStream)); + return IMG_TRUE; + } + + + /* + Set the stream marker to split output files + */ + + if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]) + { + IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]); + + if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE) + { + if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags))) + { + DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos); + gsDBGPdumpState.ui32ParamFileNum++; + } + } + } + + ui32Written = DbgWrite(psStream, pui8Data, ui32Count, ui32Flags); + + if (ui32Written == 0xFFFFFFFF) + { + return IMG_FALSE; + } + + return IMG_TRUE; +} + +/***************************************************************************** + FUNCTION : DbgSetFrame + + PURPOSE : Sets the frame in the stream + + PARAMETERS : psStream - Stream pointer + ui32Frame - Frame number to set + + RETURNS : None +*****************************************************************************/ +static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame) +{ + gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame); +} + +/***************************************************************************** + FUNCTION : DbgSetMarker + + PURPOSE : Sets the marker of the stream to split output files + + PARAMETERS : psStream - Stream pointer + ui32Marker - Marker number to set + + RETURNS : None +*****************************************************************************/ +static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker) +{ + gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker); +} + +IMG_VOID PDumpSuspendKM(IMG_VOID) +{ + atomic_inc(&gsPDumpSuspended); +} + +IMG_VOID PDumpResumeKM(IMG_VOID) +{ + atomic_dec(&gsPDumpSuspended); +} + +#endif /* #if defined (PDUMP) */ +#endif /* #if defined (SUPPORT_SGX) */ +/***************************************************************************** + End of file (PDUMP.C) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/env/linux/private_data.h b/pvr-source/services4/srvkm/env/linux/private_data.h new file mode 100644 index 0000000..6b09705 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/private_data.h @@ -0,0 +1,95 @@ +/*************************************************************************/ /*! +@Title Linux private data structure +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __INCLUDED_PRIVATE_DATA_H_ +#define __INCLUDED_PRIVATE_DATA_H_ + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) +#include <linux/list.h> +#include <drm/drmP.h> +#endif + +/* This structure is required in the rare case that a process creates + * a connection to services, but before closing the file descriptor, + * does a fork(). This fork() will duplicate the file descriptor in the + * child process. If the parent process dies before the child, this can + * cause the PVRSRVRelease() method to be called in a different process + * context than the original PVRSRVOpen(). This is bad because we need + * to update the per-process data reference count and/or free the + * per-process data. So we must keep a record of which PID's per-process + * data to inspect during ->release(). + */ + +typedef struct +{ + /* PID that created this services connection */ + IMG_UINT32 ui32OpenPID; + + /* Global kernel MemInfo handle */ +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hKernelMemInfo; +#else + IMG_HANDLE hKernelMemInfo; +#endif + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + /* The private data is on a list in the per-process data structure */ + struct list_head sDRMAuthListItem; + + struct drm_file *psDRMFile; +#endif + +#if defined(SUPPORT_MEMINFO_IDS) + /* Globally unique "stamp" for kernel MemInfo */ + IMG_UINT64 ui64Stamp; +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + + /* Accounting for OSAllocMem */ + IMG_HANDLE hBlockAlloc; + +#if defined(SUPPORT_DRI_DRM_EXT) + IMG_PVOID pPriv; /*private data for extending this struct*/ +#endif +} +PVRSRV_FILE_PRIVATE_DATA; + +#endif /* __INCLUDED_PRIVATE_DATA_H_ */ + diff --git a/pvr-source/services4/srvkm/env/linux/proc.c b/pvr-source/services4/srvkm/env/linux/proc.c new file mode 100644 index 0000000..7307257 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/proc.c @@ -0,0 +1,1414 @@ +/*************************************************************************/ /*! +@Title Proc files implementation. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Functions for creating and reading proc filesystem entries. + Proc filesystem support must be built into the kernel for + these functions to be any use. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/sched.h> + +#include "services_headers.h" + +#include "queue.h" +#include "resman.h" +#include "pvrmmap.h" +#include "pvr_debug.h" +#include "pvrversion.h" +#include "proc.h" +#include "perproc.h" +#include "env_perproc.h" +#include "linkage.h" + +#include "lists.h" + +// The proc entry for our /proc/pvr directory +static struct proc_dir_entry * dir; + +static const IMG_CHAR PVRProcDirRoot[] = "pvr"; + +static IMG_INT pvr_proc_open(struct inode *inode,struct file *file); +static void *pvr_proc_seq_start (struct seq_file *m, loff_t *pos); +static void pvr_proc_seq_stop (struct seq_file *m, void *v); +static void *pvr_proc_seq_next (struct seq_file *m, void *v, loff_t *pos); +static int pvr_proc_seq_show (struct seq_file *m, void *v); +static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); + +static struct file_operations pvr_proc_operations = +{ + .open = pvr_proc_open, + .read = seq_read, + .write = pvr_proc_write, + .llseek = seq_lseek, + .release = seq_release, +}; + +static struct seq_operations pvr_proc_seq_operations = +{ + .start = pvr_proc_seq_start, + .next = pvr_proc_seq_next, + .stop = pvr_proc_seq_stop, + .show = pvr_proc_seq_show, +}; + +static struct proc_dir_entry* g_pProcQueue; +static struct proc_dir_entry* g_pProcVersion; +static struct proc_dir_entry* g_pProcSysNodes; + +#ifdef DEBUG +static struct proc_dir_entry* g_pProcDebugLevel; +#endif + +#ifdef PVR_MANUAL_POWER_CONTROL +static struct proc_dir_entry* g_pProcPowerLevel; +#endif + + +static void ProcSeqShowVersion(struct seq_file *sfile,void* el); + +static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off); + +/*! +****************************************************************************** + + @Function : printAppend + + @Description + + Print into the supplied buffer at the specified offset remaining within + the specified total buffer size. + + @Input size : the total size of the buffer + + @Input off : the offset into the buffer to start printing + + @Input format : the printf format string + + @Input ... : format args + + @Return : The number of chars now in the buffer (original value of 'off' + plus number of chars added); 'size' if full. + +*****************************************************************************/ +off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...) +{ + IMG_INT n; + size_t space = size - (size_t)off; + va_list ap; + + va_start (ap, format); + + n = vsnprintf (buffer+off, space, format, ap); + + va_end (ap); + /* According to POSIX, n is greater than or equal to the size available if + * the print would have overflowed the buffer. Other platforms may + * return -1 if printing was truncated. + */ + if (n >= (IMG_INT)space || n < 0) + { + /* Ensure final string is terminated */ + buffer[size - 1] = 0; + return (off_t)(size - 1); + } + else + { + return (off + (off_t)n); + } +} + + +/*! +****************************************************************************** + + @Function : ProcSeq1ElementOff2Element + + @Description + + Heleper Offset -> Element function for /proc files with only one entry + without header. + + @Input sfile : seq_file object related to /proc/ file + + @Input off : the offset into the buffer (id of object) + + @Return : Pointer to element to be shown. + +*****************************************************************************/ +void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off) +{ + PVR_UNREFERENCED_PARAMETER(sfile); + // Return anything that is not PVR_RPOC_SEQ_START_TOKEN and NULL + if(!off) + return (void*)2; + return NULL; +} + + +/*! +****************************************************************************** + + @Function : ProcSeq1ElementHeaderOff2Element + + @Description + + Heleper Offset -> Element function for /proc files with only one entry + with header. + + @Input sfile : seq_file object related to /proc/ file + + @Input off : the offset into the buffer (id of object) + + @Return : Pointer to element to be shown. + +*****************************************************************************/ +void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off) +{ + PVR_UNREFERENCED_PARAMETER(sfile); + + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + // Return anything that is not PVR_RPOC_SEQ_START_TOKEN and NULL + if(off == 1) + return (void*)2; + + return NULL; +} + + +/*! +****************************************************************************** + + @Function : pvr_proc_open + + @Description + File opening function passed to proc_dir_entry->proc_fops for /proc entries + created by CreateProcReadEntrySeq. + + @Input inode : inode entry of opened /proc file + + @Input file : file entry of opened /proc file + + @Return : 0 if no errors + +*****************************************************************************/ +static IMG_INT pvr_proc_open(struct inode *inode,struct file *file) +{ + IMG_INT ret = seq_open(file, &pvr_proc_seq_operations); + + struct seq_file *seq = (struct seq_file*)file->private_data; + struct proc_dir_entry* pvr_proc_entry = PDE(inode); + + /* Add pointer to handlers to seq_file structure */ + seq->private = pvr_proc_entry->data; + return ret; +} + +/*! +****************************************************************************** + + @Function : pvr_proc_write + + @Description + File writing function passed to proc_dir_entry->proc_fops for /proc files. + It's exacly the same function that is used as default one (->fs/proc/generic.c), + it calls proc_dir_entry->write_proc for writing procedure. + +*****************************************************************************/ +static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct proc_dir_entry * dp; + + PVR_UNREFERENCED_PARAMETER(ppos); + dp = PDE(inode); + + if (!dp->write_proc) + return -EIO; + + return dp->write_proc(file, buffer, count, dp->data); +} + + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_start + + @Description + Seq_file start function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + This function ises off2element handler. + + @Input proc_seq_file : sequence file entry + + @Input pos : offset within file (id of entry) + + @Return : Pointer to element from we start enumeration (0 ends it) + +*****************************************************************************/ +static void *pvr_proc_seq_start (struct seq_file *proc_seq_file, loff_t *pos) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private; + if(handlers->startstop != NULL) + handlers->startstop(proc_seq_file, IMG_TRUE); + return handlers->off2element(proc_seq_file, *pos); +} + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_stop + + @Description + Seq_file stop function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + + @Input proc_seq_file : sequence file entry + + @Input v : current element pointer + +*****************************************************************************/ +static void pvr_proc_seq_stop (struct seq_file *proc_seq_file, void *v) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private; + PVR_UNREFERENCED_PARAMETER(v); + + if(handlers->startstop != NULL) + handlers->startstop(proc_seq_file, IMG_FALSE); +} + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_next + + @Description + Seq_file next element function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + It uses supplied 'next' handler for fetching next element (or 0 if there is no one) + + @Input proc_seq_file : sequence file entry + + @Input pos : offset within file (id of entry) + + @Input v : current element pointer + + @Return : next element pointer (or 0 if end) + +*****************************************************************************/ +static void *pvr_proc_seq_next (struct seq_file *proc_seq_file, void *v, loff_t *pos) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private; + (*pos)++; + if( handlers->next != NULL) + return handlers->next( proc_seq_file, v, *pos ); + return handlers->off2element(proc_seq_file, *pos); +} + +/*! +****************************************************************************** + + @Function : pvr_proc_seq_show + + @Description + Seq_file show element function. Detailed description of seq_file workflow can + be found here: http://tldp.org/LDP/lkmpg/2.6/html/x861.html. + It call proper 'show' handler to show (dump) current element using seq_* functions + + @Input proc_seq_file : sequence file entry + + @Input v : current element pointer + + @Return : 0 if everything is OK + +*****************************************************************************/ +static int pvr_proc_seq_show (struct seq_file *proc_seq_file, void *v) +{ + PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private; + handlers->show( proc_seq_file,v ); + return 0; +} + + + +/*! +****************************************************************************** + + @Function : CreateProcEntryInDirSeq + + @Description + + Create a file under the given directory. These dynamic files can be used at + runtime to get or set information about the device. Whis version uses seq_file + interface + + @Input pdir : parent directory + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Input whandler : the function to interpret writes from the user + + @Return Ptr to proc entry , 0 for failure + + +*****************************************************************************/ +static struct proc_dir_entry* CreateProcEntryInDirSeq( + struct proc_dir_entry *pdir, + const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + write_proc_t whandler + ) +{ + + struct proc_dir_entry * file; + mode_t mode; + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name)); + return NULL; + } + + mode = S_IFREG; + + if (show_handler) + { + mode |= S_IRUGO; + } + + if (whandler) + { + mode |= S_IWUSR; + } + + file=create_proc_entry(name, mode, pdir); + + if (file) + { + PVR_PROC_SEQ_HANDLERS *seq_handlers; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) + file->owner = THIS_MODULE; +#endif + + file->proc_fops = &pvr_proc_operations; + file->write_proc = whandler; + + /* Pass the handlers */ + file->data = kmalloc(sizeof(PVR_PROC_SEQ_HANDLERS), GFP_KERNEL); + if(file->data) + { + seq_handlers = (PVR_PROC_SEQ_HANDLERS*)file->data; + seq_handlers->next = next_handler; + seq_handlers->show = show_handler; + seq_handlers->off2element = off2element_handler; + seq_handlers->startstop = startstop_handler; + seq_handlers->data = data; + + return file; + } + } + + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name)); + return NULL; +} + + +/*! +****************************************************************************** + + @Function : CreateProcReadEntrySeq + + @Description + + Create a file under /proc/pvr. These dynamic files can be used at runtime + to get information about the device. Creation WILL fail if proc support is + not compiled into the kernel. That said, the Linux kernel is not even happy + to build without /proc support these days. This version uses seq_file structure + for handling content generation. + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Return Ptr to proc entry , 0 for failure + +*****************************************************************************/ +struct proc_dir_entry* CreateProcReadEntrySeq ( + const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler + ) +{ + return CreateProcEntrySeq(name, + data, + next_handler, + show_handler, + off2element_handler, + startstop_handler, + NULL); +} + +/*! +****************************************************************************** + + @Function : CreateProcEntrySeq + + @Description + + @Description + + Create a file under /proc/pvr. These dynamic files can be used at runtime + to get information about the device. Creation WILL fail if proc support is + not compiled into the kernel. That said, the Linux kernel is not even happy + to build without /proc support these days. This version uses seq_file structure + for handling content generation and is fuller than CreateProcReadEntrySeq (it + supports write access); + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Input whandler : the function to interpret writes from the user + + @Return Ptr to proc entry , 0 for failure + +*****************************************************************************/ +struct proc_dir_entry* CreateProcEntrySeq ( + const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + write_proc_t whandler + ) +{ + return CreateProcEntryInDirSeq( + dir, + name, + data, + next_handler, + show_handler, + off2element_handler, + startstop_handler, + whandler + ); +} + + + +/*! +****************************************************************************** + + @Function : CreatePerProcessProcEntrySeq + + @Description + + Create a file under /proc/pvr/<current process ID>. Apart from the + directory where the file is created, this works the same way as + CreateProcEntry. It's seq_file version. + + + + @Input name : the name of the file to create + + @Input data : aditional data that will be passed to handlers + + @Input next_handler : the function to call to provide the next element. OPTIONAL, if not + supplied, then off2element function is used instead + + @Input show_handler : the function to call to show element + + @Input off2element_handler : the function to call when it is needed to translate offest to element + + @Input startstop_handler : the function to call when output memory page starts or stops. OPTIONAL. + + @Input whandler : the function to interpret writes from the user + + @Return Ptr to proc entry , 0 for failure + +*****************************************************************************/ +struct proc_dir_entry* CreatePerProcessProcEntrySeq ( + const IMG_CHAR * name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + write_proc_t whandler + ) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProc; + IMG_UINT32 ui32PID; + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist", PVRProcDirRoot)); + return NULL; + } + + ui32PID = OSGetCurrentProcessIDKM(); + + psPerProc = PVRSRVPerProcessPrivateData(ui32PID); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: no per process data")); + + return NULL; + } + + if (!psPerProc->psProcDir) + { + IMG_CHAR dirname_buffer[256]; + IMG_CHAR dirname[256]; + IMG_INT ret; + const IMG_CHAR *proc_basename = dirname_buffer; + dirname_buffer[255] = dirname[255] = '\0'; + + OSGetProcCmdline(ui32PID, dirname_buffer, sizeof(dirname_buffer)); + PVR_DPF((PVR_DBG_MESSAGE, "Command Line of the process with ID %u is %s", ui32PID, dirname_buffer)); + + proc_basename = OSGetPathBaseName(dirname_buffer, sizeof(dirname_buffer)); + PVR_DPF((PVR_DBG_MESSAGE, "Base Name of the process with ID %u is %s\n", ui32PID, proc_basename)); + + ret = snprintf(dirname, sizeof(dirname), "%u-%s", ui32PID, proc_basename); + PVR_DPF((PVR_DBG_MESSAGE, "Creating a new process entry for %s with ID %u\n", proc_basename, ui32PID)); + + if (ret <=0 || ret >= (IMG_INT)sizeof(dirname)) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID)); + return NULL; + } + else + { + psPerProc->psProcDir = proc_mkdir(dirname, dir); + if (!psPerProc->psProcDir) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", + PVRProcDirRoot, ui32PID)); + return NULL; + } + } + } + + return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data, next_handler, + show_handler,off2element_handler,startstop_handler,whandler); +} + + +/*! +****************************************************************************** + + @Function : RemoveProcEntrySeq + + @Description + + Remove a single node (created using *Seq function) under /proc/pvr. + + @Input proc_entry : structure returned by Create function. + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemoveProcEntrySeq( struct proc_dir_entry* proc_entry ) +{ + if (dir) + { + void* data = proc_entry->data ; + PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, proc_entry->name)); + + remove_proc_entry(proc_entry->name, dir); + if( data) + kfree( data ); + + } +} + +/*! +****************************************************************************** + + @Function : RemovePerProcessProcEntry Seq + + @Description + + Remove a single node under the per process proc directory (created by *Seq function). + + Remove a single node (created using *Seq function) under /proc/pvr. + + @Input proc_entry : structure returned by Create function. + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProc; + + psPerProc = LinuxTerminatingProcessPrivateData(); + if (!psPerProc) + { + psPerProc = PVRSRVFindPerProcessPrivateData(); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't " + "remove %s, no per process data", proc_entry->name)); + return; + } + } + + if (psPerProc->psProcDir) + { + void* data = proc_entry->data ; + PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", proc_entry->name, psPerProc->psProcDir->name)); + + remove_proc_entry(proc_entry->name, psPerProc->psProcDir); + if(data) + kfree( data ); + } +} + +/*! +****************************************************************************** + + @Function : pvr_read_proc_vm + + @Description + + When the user accesses the proc filesystem entry for the device, we are + called here to create the content for the 'file'. We can print anything we + want here. If the info we want to return is too big for one page ('count' + chars), we return successive chunks on each call. For a number of ways of + achieving this, refer to proc_file_read() in linux/fs/proc/generic.c. + + Here, as we are accessing lists of information, we output '1' in '*start' to + instruct proc to advance 'off' by 1 on each call. The number of chars placed + in the buffer is returned. Multiple calls are made here by the proc + filesystem until we set *eof. We can return zero without setting eof to + instruct proc to flush 'page' (causing it to be printed) if there is not + enough space left (eg for a complete line). + + @Input page : where to write the output + + @Input start : memory location into which should be written next offset + to read from. + + @Input off : the offset into the /proc file being read + + @Input count : the size of the buffer 'page' + + @Input eof : memory location into which 1 should be written when at EOF + + @Input data : data specific to this /proc file entry + + @Return : length of string written to page + +*****************************************************************************/ +static IMG_INT pvr_read_proc(IMG_CHAR *page, IMG_CHAR **start, off_t off, + IMG_INT count, IMG_INT *eof, IMG_VOID *data) +{ + /* PRQA S 0307 1 */ /* ignore warning about casting to different pointer type */ + pvr_read_proc_t *pprn = (pvr_read_proc_t *)data; + + off_t len = pprn (page, (size_t)count, off); + + if (len == END_OF_FILE) + { + len = 0; + *eof = 1; + } + else if (!len) /* not enough space in the buffer */ + { + *start = (IMG_CHAR *) 0; /* don't advance the offset */ + } + else + { + *start = (IMG_CHAR *) 1; + } + + return len; +} + + +/*! +****************************************************************************** + + @Function : CreateProcEntryInDir + + @Description + + Create a file under the given directory. These dynamic files can be used at + runtime to get or set information about the device. + + @Input pdir : parent directory + + @Input name : the name of the file to create + + @Input rhandler : the function to supply the content + + @Input whandler : the function to interpret writes from the user + + @Return success code : 0 or -errno. + +*****************************************************************************/ +static IMG_INT CreateProcEntryInDir(struct proc_dir_entry *pdir, const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data) +{ + struct proc_dir_entry * file; + mode_t mode; + + if (!pdir) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDir: parent directory doesn't exist")); + + return -ENOMEM; + } + + mode = S_IFREG; + + if (rhandler) + { + mode |= S_IRUGO; + } + + if (whandler) + { + mode |= S_IWUSR; + } + + file = create_proc_entry(name, mode, pdir); + + if (file) + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) + file->owner = THIS_MODULE; +#endif + file->read_proc = rhandler; + file->write_proc = whandler; + file->data = data; + + PVR_DPF((PVR_DBG_MESSAGE, "Created proc entry %s in %s", name, pdir->name)); + + return 0; + } + + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot create proc entry %s in %s", name, pdir->name)); + + return -ENOMEM; +} + + +/*! +****************************************************************************** + + @Function : CreateProcEntry + + @Description + + Create a file under /proc/pvr. These dynamic files can be used at runtime + to get or set information about the device. + + This interface is fuller than CreateProcReadEntry, and supports write access; + it is really just a wrapper for the native linux functions. + + @Input name : the name of the file to create under /proc/pvr + + @Input rhandler : the function to supply the content + + @Input whandler : the function to interpret writes from the user + + @Return success code : 0 or -errno. + +*****************************************************************************/ +IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data) +{ + return CreateProcEntryInDir(dir, name, rhandler, whandler, data); +} + + +/*! +****************************************************************************** + + @Function : CreatePerProcessProcEntry + + @Description + + Create a file under /proc/pvr/<current process ID>. Apart from the + directory where the file is created, this works the same way as + CreateProcEntry. + + @Input name : the name of the file to create under the per process /proc directory + + @Input rhandler : the function to supply the content + + @Input whandler : the function to interpret writes from the user + + @Return success code : 0 or -errno. + +*****************************************************************************/ +IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProc; + IMG_UINT32 ui32PID; + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: /proc/%s doesn't exist", PVRProcDirRoot)); + + return -ENOMEM; + } + + ui32PID = OSGetCurrentProcessIDKM(); + + psPerProc = PVRSRVPerProcessPrivateData(ui32PID); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: no per process data")); + + return -ENOMEM; + } + + if (!psPerProc->psProcDir) + { + IMG_CHAR dirname[16]; + IMG_INT ret; + + ret = snprintf(dirname, sizeof(dirname), "%u", ui32PID); + + if (ret <=0 || ret >= (IMG_INT)sizeof(dirname)) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID)); + + return -ENOMEM; + } + else + { + psPerProc->psProcDir = proc_mkdir(dirname, dir); + if (!psPerProc->psProcDir) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", PVRProcDirRoot, ui32PID)); + + return -ENOMEM; + } + } + } + + return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler, whandler, data); +} + + +/*! +****************************************************************************** + + @Function : CreateProcReadEntry + + @Description + + Create a file under /proc/pvr. These dynamic files can be used at runtime + to get information about the device. Creation WILL fail if proc support is + not compiled into the kernel. That said, the Linux kernel is not even happy + to build without /proc support these days. + + @Input name : the name of the file to create + + @Input handler : the function to call to provide the content + + @Return 0 for success, -errno for failure + +*****************************************************************************/ +IMG_INT CreateProcReadEntry(const IMG_CHAR * name, pvr_read_proc_t handler) +{ + struct proc_dir_entry * file; + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name)); + + return -ENOMEM; + } + + /* PRQA S 0307 1 */ /* ignore warning about casting to different pointer type */ + file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (IMG_VOID *)handler); + + if (file) + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) + file->owner = THIS_MODULE; +#endif + return 0; + } + + PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name)); + + return -ENOMEM; +} + + +/*! +****************************************************************************** + + @Function : CreateProcEntries + + @Description + + Create a directory /proc/pvr and the necessary entries within it. These + dynamic files can be used at runtime to get information about the device. + Creation might fail if proc support is not compiled into the kernel or if + there is no memory + + @Input none + + @Return nothing + +*****************************************************************************/ +IMG_INT CreateProcEntries(IMG_VOID) +{ + dir = proc_mkdir (PVRProcDirRoot, NULL); + + if (!dir) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/%s directory", PVRProcDirRoot)); + + return -ENOMEM; + } + + g_pProcQueue = CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue, ProcSeqOff2ElementQueue, NULL); + g_pProcVersion = CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion, ProcSeq1ElementHeaderOff2Element, NULL); + g_pProcSysNodes = CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes, ProcSeqOff2ElementSysNodes, NULL); + + if(!g_pProcQueue || !g_pProcVersion || !g_pProcSysNodes) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s files", PVRProcDirRoot)); + + return -ENOMEM; + } + + +#ifdef DEBUG + + g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL, + ProcSeqShowDebugLevel, + ProcSeq1ElementOff2Element, NULL, + (IMG_VOID*)PVRDebugProcSetLevel); + if(!g_pProcDebugLevel) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/debug_level", PVRProcDirRoot)); + + return -ENOMEM; + } + +#ifdef PVR_MANUAL_POWER_CONTROL + g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL, + ProcSeqShowPowerLevel, + ProcSeq1ElementOff2Element, NULL, + PVRProcSetPowerLevel); + if(!g_pProcPowerLevel) + { + PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/power_control", PVRProcDirRoot)); + + return -ENOMEM; + } +#endif +#endif + + return 0; +} + + +/*! +****************************************************************************** + + @Function : RemoveProcEntry + + @Description + + Remove a single node under /proc/pvr. + + @Input name : the name of the node to remove + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemoveProcEntry(const IMG_CHAR * name) +{ + if (dir) + { + remove_proc_entry(name, dir); + PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, name)); + } +} + + +/*! +****************************************************************************** + + @Function : RemovePerProcessProcEntry + + @Description + + Remove a single node under the per process proc directory. + + @Input name : the name of the node to remove + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR *name) +{ + PVRSRV_ENV_PER_PROCESS_DATA *psPerProc; + + psPerProc = LinuxTerminatingProcessPrivateData(); + if (!psPerProc) + { + psPerProc = PVRSRVFindPerProcessPrivateData(); + if (!psPerProc) + { + PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't " + "remove %s, no per process data", name)); + return; + } + } + + if (psPerProc->psProcDir) + { + remove_proc_entry(name, psPerProc->psProcDir); + + PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", name, psPerProc->psProcDir->name)); + } +} + + +/*! +****************************************************************************** + + @Function : RemovePerProcessProcDir + + @Description + + Remove the per process directorty under /proc/pvr. + + @Input psPerProc : environment specific per process data + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc) +{ + if (psPerProc->psProcDir) + { + while (psPerProc->psProcDir->subdir) + { + PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s/%s", PVRProcDirRoot, psPerProc->psProcDir->name, psPerProc->psProcDir->subdir->name)); + + RemoveProcEntry(psPerProc->psProcDir->subdir->name); + } + RemoveProcEntry(psPerProc->psProcDir->name); + } +} + +/*! +****************************************************************************** + + @Function : RemoveProcEntries + + Description + + Proc filesystem entry deletion - Remove all proc filesystem entries for + the driver. + + @Input none + + @Return nothing + +*****************************************************************************/ +IMG_VOID RemoveProcEntries(IMG_VOID) +{ +#ifdef DEBUG + RemoveProcEntrySeq( g_pProcDebugLevel ); +#ifdef PVR_MANUAL_POWER_CONTROL + RemoveProcEntrySeq( g_pProcPowerLevel ); +#endif /* PVR_MANUAL_POWER_CONTROL */ +#endif + + RemoveProcEntrySeq(g_pProcQueue); + RemoveProcEntrySeq(g_pProcVersion); + RemoveProcEntrySeq(g_pProcSysNodes); + + while (dir->subdir) + { + PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s", PVRProcDirRoot, dir->subdir->name)); + + RemoveProcEntry(dir->subdir->name); + } + + remove_proc_entry(PVRProcDirRoot, NULL); +} + +/***************************************************************************** + FUNCTION : ProcSeqShowVersion + + PURPOSE : Print the content of version to /proc file + + PARAMETERS : sfile - /proc seq_file + el - Element to print +*****************************************************************************/ +static void ProcSeqShowVersion(struct seq_file *sfile,void* el) +{ + SYS_DATA *psSysData; + IMG_CHAR *pszSystemVersionString = "None"; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf(sfile, + "Version %s (%s) %s\n", + PVRVERSION_STRING, + PVR_BUILD_TYPE, PVR_BUILD_DIR); + return; + } + + psSysData = SysAcquireDataNoCheck(); + if(psSysData != IMG_NULL && psSysData->pszVersionString != IMG_NULL) + { + pszSystemVersionString = psSysData->pszVersionString; + } + + seq_printf( sfile, "System Version String: %s\n", pszSystemVersionString); +} + +/*! +****************************************************************************** + + @Function procDumpSysNodes (plus deviceTypeToString and deviceClassToString) + + @Description + + Format the contents of /proc/pvr/nodes + + @Input buf : where to place format contents data. + + @Input size : the size of the buffer into which to place data + + @Input off : how far into the file we are. + + @Return amount of data placed in buffer, 0, or END_OF_FILE : + +******************************************************************************/ +static const IMG_CHAR *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType) +{ + switch (deviceType) + { + default: + { + static IMG_CHAR text[10]; + + sprintf(text, "?%x", (IMG_UINT)deviceType); + + return text; + } + } +} + + +static const IMG_CHAR *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass) +{ + switch (deviceClass) + { + case PVRSRV_DEVICE_CLASS_3D: + { + return "3D"; + } + case PVRSRV_DEVICE_CLASS_DISPLAY: + { + return "display"; + } + case PVRSRV_DEVICE_CLASS_BUFFER: + { + return "buffer"; + } + default: + { + static IMG_CHAR text[10]; + + sprintf(text, "?%x", (IMG_UINT)deviceClass); + return text; + } + } +} + +static IMG_VOID* DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE *psNode, va_list va) +{ + off_t *pOff = va_arg(va, off_t*); + if (--(*pOff)) + { + return IMG_NULL; + } + else + { + return psNode; + } +} + +/***************************************************************************** + FUNCTION : ProcSeqShowSysNodes + + PURPOSE : Print the content of version to /proc file + + PARAMETERS : sfile - /proc seq_file + el - Element to print +*****************************************************************************/ +static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el) +{ + PVRSRV_DEVICE_NODE *psDevNode; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf( sfile, + "Registered nodes\n" + "Addr Type Class Index Ref pvDev Size Res\n"); + return; + } + + psDevNode = (PVRSRV_DEVICE_NODE*)el; + + seq_printf( sfile, + "%p %-8s %-8s %4d %2u %p %3u %p\n", + psDevNode, + deviceTypeToString(psDevNode->sDevId.eDeviceType), + deviceClassToString(psDevNode->sDevId.eDeviceClass), + psDevNode->sDevId.eDeviceClass, + psDevNode->ui32RefCount, + psDevNode->pvDevice, + psDevNode->ui32pvDeviceSize, + psDevNode->hResManContext); +} + +/***************************************************************************** + FUNCTION : ProcSeqOff2ElementSysNodes + + PURPOSE : Transale offset to element (/proc stuff) + + PARAMETERS : sfile - /proc seq_file + off - the offset into the buffer + + RETURNS : element to print +*****************************************************************************/ +static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off) +{ + SYS_DATA *psSysData; + PVRSRV_DEVICE_NODE*psDevNode = IMG_NULL; + + PVR_UNREFERENCED_PARAMETER(sfile); + + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + psSysData = SysAcquireDataNoCheck(); + if (psSysData != IMG_NULL) + { + /* Find Dev Node */ + psDevNode = (PVRSRV_DEVICE_NODE*) + List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList, + DecOffPsDev_AnyVaCb, + &off); + } + + /* Return anything that is not PVR_RPOC_SEQ_START_TOKEN and NULL */ + return (void*)psDevNode; +} + +/***************************************************************************** + End of file (proc.c) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/env/linux/proc.h b/pvr-source/services4/srvkm/env/linux/proc.h new file mode 100644 index 0000000..bc2a554 --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/proc.h @@ -0,0 +1,127 @@ +/*************************************************************************/ /*! +@Title Proc interface definition. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Functions for creating and reading proc filesystem entries. + Refer to proc.c +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __SERVICES_PROC_H__ +#define __SERVICES_PROC_H__ + +#include <asm/system.h> // va_list etc +#include <linux/proc_fs.h> // read_proc_t etc +#include <linux/seq_file.h> // seq_file + +#define END_OF_FILE (off_t) -1 + +typedef off_t (pvr_read_proc_t)(IMG_CHAR *, size_t, off_t); + + +#define PVR_PROC_SEQ_START_TOKEN (void*)1 +typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t); +typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t); +typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*); +typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, IMG_BOOL start); + +typedef struct _PVR_PROC_SEQ_HANDLERS_ { + pvr_next_proc_seq_t *next; + pvr_show_proc_seq_t *show; + pvr_off2element_proc_seq_t *off2element; + pvr_startstop_proc_seq_t *startstop; + IMG_VOID *data; +} PVR_PROC_SEQ_HANDLERS; + + +/** off2element function for elements with only ONE element (no header) */ +void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off); + +/** off2element function for elements with only ONE element (+ header) */ +void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off); + +off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...) + __attribute__((format(printf, 4, 5))); + +IMG_INT CreateProcEntries(IMG_VOID); + +IMG_INT CreateProcReadEntry (const IMG_CHAR * name, pvr_read_proc_t handler); + +IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data); + +IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data); + +IMG_VOID RemoveProcEntry(const IMG_CHAR * name); + +IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR * name); + +IMG_VOID RemoveProcEntries(IMG_VOID); + +struct proc_dir_entry* CreateProcReadEntrySeq ( + const IMG_CHAR* name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler + ); + +struct proc_dir_entry* CreateProcEntrySeq ( + const IMG_CHAR* name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + write_proc_t whandler + ); + +struct proc_dir_entry* CreatePerProcessProcEntrySeq ( + const IMG_CHAR* name, + IMG_VOID* data, + pvr_next_proc_seq_t next_handler, + pvr_show_proc_seq_t show_handler, + pvr_off2element_proc_seq_t off2element_handler, + pvr_startstop_proc_seq_t startstop_handler, + write_proc_t whandler + ); + + +IMG_VOID RemoveProcEntrySeq(struct proc_dir_entry* proc_entry); +IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry); + +#endif diff --git a/pvr-source/services4/srvkm/env/linux/pvr_bridge_k.c b/pvr-source/services4/srvkm/env/linux/pvr_bridge_k.c new file mode 100644 index 0000000..5d1ad8c --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/pvr_bridge_k.c @@ -0,0 +1,524 @@ +/*************************************************************************/ /*! +@Title PVR Bridge Module (kernel side) +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Receives calls from the user portion of services and + despatches them to functions in the kernel portion. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include "img_defs.h" +#include "services.h" +#include "pvr_bridge.h" +#include "perproc.h" +#include "mutex.h" +#include "syscommon.h" +#include "pvr_debug.h" +#include "proc.h" +#include "private_data.h" +#include "linkage.h" +#include "pvr_bridge_km.h" +#include "pvr_uaccess.h" +#include "refcount.h" +#include "buffer_manager.h" + +#if defined(SUPPORT_DRI_DRM) +#include <drm/drmP.h> +#include "pvr_drm.h" +#if defined(PVR_SECURE_DRM_AUTH_EXPORT) +#include "env_perproc.h" +#endif +#endif + +/* VGX: */ +#if defined(SUPPORT_VGX) +#include "vgx_bridge.h" +#endif + +/* SGX: */ +#if defined(SUPPORT_SGX) +#include "sgx_bridge.h" +#endif + +#include "bridged_pvr_bridge.h" + +#if defined(SUPPORT_DRI_DRM) +#define PRIVATE_DATA(pFile) ((pFile)->driver_priv) +#else +#define PRIVATE_DATA(pFile) ((pFile)->private_data) +#endif + +#if defined(DEBUG_BRIDGE_KM) + +static struct proc_dir_entry *g_ProcBridgeStats =0; +static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off); +static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el); +static void* ProcSeqOff2ElementBridgeStats(struct seq_file * sfile, loff_t off); +static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start); + +#endif + +extern PVRSRV_LINUX_MUTEX gPVRSRVLock; + +#if defined(SUPPORT_MEMINFO_IDS) +static IMG_UINT64 ui64Stamp; +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + +PVRSRV_ERROR +LinuxBridgeInit(IMG_VOID) +{ +#if defined(DEBUG_BRIDGE_KM) + { + g_ProcBridgeStats = CreateProcReadEntrySeq( + "bridge_stats", + NULL, + ProcSeqNextBridgeStats, + ProcSeqShowBridgeStats, + ProcSeqOff2ElementBridgeStats, + ProcSeqStartstopBridgeStats + ); + if(!g_ProcBridgeStats) + { + return PVRSRV_ERROR_OUT_OF_MEMORY; + } + } +#endif + return CommonBridgeInit(); +} + +IMG_VOID +LinuxBridgeDeInit(IMG_VOID) +{ +#if defined(DEBUG_BRIDGE_KM) + RemoveProcEntrySeq(g_ProcBridgeStats); +#endif +} + +#if defined(DEBUG_BRIDGE_KM) + +/* + * Lock MMap regions list (called on page start/stop while reading /proc/mmap) + * + * sfile : seq_file that handles /proc file + * start : TRUE if it's start, FALSE if it's stop + * + */ +static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start) +{ + if(start) + { + LinuxLockMutex(&gPVRSRVLock); + } + else + { + LinuxUnLockMutex(&gPVRSRVLock); + } +} + + +/* + * Convert offset (index from KVOffsetTable) to element + * (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element that will be dumped + * +*/ +static void* ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off) +{ + if(!off) + { + return PVR_PROC_SEQ_START_TOKEN; + } + + if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) + { + return (void*)0; + } + + + return (void*)&g_BridgeDispatchTable[off-1]; +} + +/* + * Gets next MMap element to show. (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * off : index into the KVOffsetTable from which to print + * + * returns void* : Pointer to element to show (0 ends iteration) +*/ +static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off) +{ + return ProcSeqOff2ElementBridgeStats(sfile,off); +} + + +/* + * Show MMap element (called when reading /proc/mmap file) + + * sfile : seq_file that handles /proc file + * el : actual element + * +*/ +static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el) +{ + PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY*)el; + + if(el == PVR_PROC_SEQ_START_TOKEN) + { + seq_printf(sfile, + "Total ioctl call count = %u\n" + "Total number of bytes copied via copy_from_user = %u\n" + "Total number of bytes copied via copy_to_user = %u\n" + "Total number of bytes copied via copy_*_user = %u\n\n" + "%-45s | %-40s | %10s | %20s | %10s\n", + g_BridgeGlobalStats.ui32IOCTLCount, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, + g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes, + "Bridge Name", + "Wrapper Function", + "Call Count", + "copy_from_user Bytes", + "copy_to_user Bytes" + ); + return; + } + + seq_printf(sfile, + "%-45s %-40s %-10u %-20u %-10u\n", + psEntry->pszIOCName, + psEntry->pszFunctionName, + psEntry->ui32CallCount, + psEntry->ui32CopyFromUserTotalBytes, + psEntry->ui32CopyToUserTotalBytes); +} + +#endif /* DEBUG_BRIDGE_KM */ + + +#if defined(SUPPORT_DRI_DRM) +int +PVRSRV_BridgeDispatchKM(struct drm_device unref__ *dev, void *arg, struct drm_file *pFile) +#else +long +PVRSRV_BridgeDispatchKM(struct file *pFile, unsigned int unref__ ioctlCmd, unsigned long arg) +#endif +{ + IMG_UINT32 cmd; +#if !defined(SUPPORT_DRI_DRM) + PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg; + PVRSRV_BRIDGE_PACKAGE sBridgePackageKM; +#endif + PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM; + IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); + PVRSRV_PER_PROCESS_DATA *psPerProc; + IMG_INT err = -EFAULT; + + LinuxLockMutex(&gPVRSRVLock); + +#if defined(SUPPORT_DRI_DRM) + psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg; + PVR_ASSERT(psBridgePackageKM != IMG_NULL); +#else + psBridgePackageKM = &sBridgePackageKM; + + if(!OSAccessOK(PVR_VERIFY_WRITE, + psBridgePackageUM, + sizeof(PVRSRV_BRIDGE_PACKAGE))) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments", + __FUNCTION__)); + + goto unlock_and_return; + } + + /* FIXME - Currently the CopyFromUserWrapper which collects stats about + * how much data is shifted to/from userspace isn't available to us + * here. */ + if(OSCopyFromUser(IMG_NULL, + psBridgePackageKM, + psBridgePackageUM, + sizeof(PVRSRV_BRIDGE_PACKAGE)) + != PVRSRV_OK) + { + goto unlock_and_return; + } +#endif + + cmd = psBridgePackageKM->ui32BridgeID; + + if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES) + { + PVRSRV_ERROR eError; + + eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psPerProc, + psBridgePackageKM->hKernelServices, + PVRSRV_HANDLE_TYPE_PERPROC_DATA); + if(eError != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)", + __FUNCTION__, eError)); + goto unlock_and_return; + } + + if(psPerProc->ui32PID != ui32PID) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data " + "belonging to process %d", __FUNCTION__, ui32PID, + psPerProc->ui32PID)); + goto unlock_and_return; + } + } + else + { + /* lookup per-process data for this process */ + psPerProc = PVRSRVPerProcessData(ui32PID); + if(psPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: " + "Couldn't create per-process data area")); + goto unlock_and_return; + } + } + + psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID); + + switch(cmd) + { + case PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2: + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo " + "per file descriptor", __FUNCTION__)); + err = -EINVAL; + goto unlock_and_return; + } + break; + } + + case PVRSRV_BRIDGE_MAP_DEV_MEMORY_2: + { + PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN = + (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(!psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no " + "associated MemInfo handle", __FUNCTION__)); + err = -EINVAL; + goto unlock_and_return; + } + + if (pvr_put_user(psPrivateData->hKernelMemInfo, &psMapDevMemIN->hKernelMemInfo) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } + + default: + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + + if(psPrivateData->hKernelMemInfo) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried " + "to use privileged service", __FUNCTION__)); + goto unlock_and_return; + } + break; + } + } + +#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) + switch(cmd) + { + case PVRSRV_BRIDGE_MAP_DEV_MEMORY: + case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY: + { + PVRSRV_FILE_PRIVATE_DATA *psPrivateData; + int authenticated = pFile->authenticated; + PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc; + + if (authenticated) + { + break; + } + + /* + * The DRM file structure we are using for Services + * is not one that DRI authentication was done on. + * Look for an authenticated file structure for + * this process, making sure the DRM master is the + * same as ours. + */ + psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)PVRSRVProcessPrivateData(psPerProc); + if (psEnvPerProc == IMG_NULL) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Process private data not allocated", __FUNCTION__)); + err = -EFAULT; + goto unlock_and_return; + } + + list_for_each_entry(psPrivateData, &psEnvPerProc->sDRMAuthListHead, sDRMAuthListItem) + { + struct drm_file *psDRMFile = psPrivateData->psDRMFile; + + if (pFile->master == psDRMFile->master) + { + authenticated |= psDRMFile->authenticated; + if (authenticated) + { + break; + } + } + } + + if (!authenticated) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Not authenticated for mapping device or device class memory", __FUNCTION__)); + err = -EPERM; + goto unlock_and_return; + } + break; + } + default: + break; + } +#endif /* defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT) */ + + err = BridgedDispatchKM(psPerProc, psBridgePackageKM); + if(err != PVRSRV_OK) + goto unlock_and_return; + + switch(cmd) + { + case PVRSRV_BRIDGE_EXPORT_DEVICEMEM_2: + { + PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT = + (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + IMG_HANDLE hMemInfo; + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; + + if (pvr_get_user(hMemInfo, &psExportDeviceMemOUT->hMemInfo) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + + /* Look up the meminfo we just exported */ + if(PVRSRVLookupHandle(KERNEL_HANDLE_BASE, + (IMG_PVOID *)&psKernelMemInfo, + hMemInfo, + PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) + { + PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up export handle", __FUNCTION__)); + err = -EFAULT; + goto unlock_and_return; + } + + /* Bump the refcount; decremented on release of the fd */ + PVRSRVKernelMemInfoIncRef(psKernelMemInfo); + + /* Tell the XProc about the export if required */ + if (psKernelMemInfo->sShareMemWorkaround.bInUse) + { + BM_XProcIndexAcquire(psKernelMemInfo->sShareMemWorkaround.ui32ShareIndex); + } + + psPrivateData->hKernelMemInfo = hMemInfo; +#if defined(SUPPORT_MEMINFO_IDS) + psPrivateData->ui64Stamp = ++ui64Stamp; + + psKernelMemInfo->ui64Stamp = psPrivateData->ui64Stamp; + if (pvr_put_user(psPrivateData->ui64Stamp, &psExportDeviceMemOUT->ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } +#endif + break; + } + +#if defined(SUPPORT_MEMINFO_IDS) + case PVRSRV_BRIDGE_MAP_DEV_MEMORY: + case PVRSRV_BRIDGE_MAP_DEV_MEMORY_2: + { + PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT = + (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut; + PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); + if (pvr_put_user(psPrivateData->ui64Stamp, &psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } + + case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY: + { + PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT = + (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut; + if (pvr_put_user(++ui64Stamp, &psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp) != 0) + { + err = -EFAULT; + goto unlock_and_return; + } + break; + } +#endif /* defined(SUPPORT_MEMINFO_IDS) */ + + default: + break; + } + +unlock_and_return: + LinuxUnLockMutex(&gPVRSRVLock); + return err; +} diff --git a/pvr-source/services4/srvkm/env/linux/pvr_debug.c b/pvr-source/services4/srvkm/env/linux/pvr_debug.c new file mode 100644 index 0000000..04e42ad --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/pvr_debug.c @@ -0,0 +1,506 @@ +/*************************************************************************/ /*! +@Title Debug Functionality +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides kernel side Debug Functionality +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <asm/io.h> +#include <asm/uaccess.h> +#include <linux/kernel.h> +#include <linux/hardirq.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/string.h> // strncpy, strlen +#include <stdarg.h> +#include "img_types.h" +#include "servicesext.h" +#include "pvr_debug.h" +#include "srvkm.h" +#include "proc.h" +#include "mutex.h" +#include "linkage.h" +#include "pvr_uaccess.h" + +#if !defined(CONFIG_PREEMPT) +#define PVR_DEBUG_ALWAYS_USE_SPINLOCK +#endif + +static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, + const IMG_CHAR* pszFormat, va_list VArgs) + IMG_FORMAT_PRINTF(3, 0); + + +#if defined(PVRSRV_NEED_PVR_DPF) + +#define PVR_MAX_FILEPATH_LEN 256 + +static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, + const IMG_CHAR *pszFormat, ...) + IMG_FORMAT_PRINTF(3, 4); + +/* NOTE: Must NOT be static! Used in module.c.. */ +IMG_UINT32 gPVRDebugLevel = + (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING); + +#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */ + +#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN + +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) +/* Message buffer for non-IRQ messages */ +static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1]; +#endif + +/* Message buffer for IRQ messages */ +static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1]; + +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) +/* The lock is used to control access to gszBufferNonIRQ */ +static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +/* The lock is used to control access to gszBufferIRQ */ +/* PRQA S 0671,0685 1 */ /* ignore warnings about C99 style initialisation */ +static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED; +#else +static DEFINE_SPINLOCK(gsDebugLockIRQ); +#endif + +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) +#if !defined (USE_SPIN_LOCK) /* to keep QAC happy */ +#define USE_SPIN_LOCK (in_interrupt() || !preemptible()) +#endif +#endif + +static inline void GetBufferLock(unsigned long *pulLockFlags) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + if (USE_SPIN_LOCK) +#endif + { + spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags); + } +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + else + { + LinuxLockMutex(&gsDebugMutexNonIRQ); + } +#endif +} + +static inline void ReleaseBufferLock(unsigned long ulLockFlags) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + if (USE_SPIN_LOCK) +#endif + { + spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags); + } +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + else + { + LinuxUnLockMutex(&gsDebugMutexNonIRQ); + } +#endif +} + +static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + if (USE_SPIN_LOCK) +#endif + { + *ppszBuf = gszBufferIRQ; + *pui32BufSiz = sizeof(gszBufferIRQ); + } +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + else + { + *ppszBuf = gszBufferNonIRQ; + *pui32BufSiz = sizeof(gszBufferNonIRQ); + } +#endif +} + +/* + * Append a string to a buffer using formatted conversion. + * The function takes a variable number of arguments, pointed + * to by the var args list. + */ +static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR* pszFormat, va_list VArgs) +{ + IMG_UINT32 ui32Used; + IMG_UINT32 ui32Space; + IMG_INT32 i32Len; + + ui32Used = strlen(pszBuf); + BUG_ON(ui32Used >= ui32BufSiz); + ui32Space = ui32BufSiz - ui32Used; + + i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs); + pszBuf[ui32BufSiz - 1] = 0; + + /* Return true if string was truncated */ + return (i32Len < 0 || i32Len >= (IMG_INT32)ui32Space) ? IMG_TRUE : IMG_FALSE; +} + +/* Actually required for ReleasePrintf too */ + +IMG_VOID PVRDPFInit(IMG_VOID) +{ +#if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) + LinuxInitMutex(&gsDebugMutexNonIRQ); +#endif +} + +/*! +****************************************************************************** + @Function PVRSRVReleasePrintf + @Description To output an important message to the user in release builds + @Input pszFormat - The message format string + @Input ... - Zero or more arguments for use by the format string + @Return None + ******************************************************************************/ +IMG_VOID PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) +{ + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf; + IMG_UINT32 ui32BufSiz; + + SelectBuffer(&pszBuf, &ui32BufSiz); + + va_start(vaArgs, pszFormat); + + GetBufferLock(&ulLockFlags); + strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1)); + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + + ReleaseBufferLock(ulLockFlags); + va_end(vaArgs); + +} + +#if defined(PVRSRV_NEED_PVR_TRACE) + +/*! +****************************************************************************** + @Function PVRTrace + @Description To output a debug message to the user + @Input pszFormat - The message format string + @Input ... - Zero or more arguments for use by the format string + @Return None + ******************************************************************************/ +IMG_VOID PVRSRVTrace(const IMG_CHAR* pszFormat, ...) +{ + va_list VArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf; + IMG_UINT32 ui32BufSiz; + + SelectBuffer(&pszBuf, &ui32BufSiz); + + va_start(VArgs, pszFormat); + + GetBufferLock(&ulLockFlags); + + strncpy(pszBuf, "PVR: ", (ui32BufSiz -1)); + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs)) + { + printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + + ReleaseBufferLock(ulLockFlags); + + va_end(VArgs); +} + +#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ + +#if defined(PVRSRV_NEED_PVR_DPF) + +/* + * Append a string to a buffer using formatted conversion. + * The function takes a variable number of arguments, calling + * VBAppend to do the actual work. + */ +static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...) +{ + va_list VArgs; + IMG_BOOL bTrunc; + + va_start (VArgs, pszFormat); + + bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs); + + va_end (VArgs); + + return bTrunc; +} + +/*! +****************************************************************************** + @Function PVRSRVDebugPrintf + @Description To output a debug message to the user + @Input uDebugLevel - The current debug level + @Input pszFile - The source file generating the message + @Input uLine - The line of the source file + @Input pszFormat - The message format string + @Input ... - Zero or more arguments for use by the format string + @Return None + ******************************************************************************/ +IMG_VOID PVRSRVDebugPrintf ( + IMG_UINT32 ui32DebugLevel, + const IMG_CHAR* pszFullFileName, + IMG_UINT32 ui32Line, + const IMG_CHAR* pszFormat, + ... + ) +{ + IMG_BOOL bTrace; + const IMG_CHAR *pszFileName = pszFullFileName; + IMG_CHAR *pszLeafName; + + + bTrace = (IMG_BOOL)(ui32DebugLevel & DBGPRIV_CALLTRACE) ? IMG_TRUE : IMG_FALSE; + + if (gPVRDebugLevel & ui32DebugLevel) + { + va_list vaArgs; + unsigned long ulLockFlags = 0; + IMG_CHAR *pszBuf; + IMG_UINT32 ui32BufSiz; + + SelectBuffer(&pszBuf, &ui32BufSiz); + + va_start(vaArgs, pszFormat); + + GetBufferLock(&ulLockFlags); + + /* Add in the level of warning */ + if (bTrace == IMG_FALSE) + { + switch(ui32DebugLevel) + { + case DBGPRIV_FATAL: + { + strncpy (pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_ERROR: + { + strncpy (pszBuf, "PVR_K:(Error): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_WARNING: + { + strncpy (pszBuf, "PVR_K:(Warning): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_MESSAGE: + { + strncpy (pszBuf, "PVR_K:(Message): ", (ui32BufSiz -1)); + break; + } + case DBGPRIV_VERBOSE: + { + strncpy (pszBuf, "PVR_K:(Verbose): ", (ui32BufSiz -1)); + break; + } + default: + { + strncpy (pszBuf, "PVR_K:(Unknown message level)", (ui32BufSiz -1)); + break; + } + } + } + else + { + strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1)); + } + + if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) + { + printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + /* Traces don't need a location */ + if (bTrace == IMG_FALSE) + { +#ifdef DEBUG_LOG_PATH_TRUNCATE + /* Buffer for rewriting filepath in log messages */ + static IMG_CHAR szFileNameRewrite[PVR_MAX_FILEPATH_LEN]; + + IMG_CHAR* pszTruncIter; + IMG_CHAR* pszTruncBackInter; + + /* Truncate path (DEBUG_LOG_PATH_TRUNCATE shoud be set to EURASIA env var)*/ + if (strlen(pszFullFileName) > strlen(DEBUG_LOG_PATH_TRUNCATE)+1) + pszFileName = pszFullFileName + strlen(DEBUG_LOG_PATH_TRUNCATE)+1; + + /* Try to find '/../' entries and remove it together with + previous entry. Repeat unit all removed */ + strncpy(szFileNameRewrite, pszFileName,PVR_MAX_FILEPATH_LEN); + + if(strlen(szFileNameRewrite) == PVR_MAX_FILEPATH_LEN-1) { + IMG_CHAR szTruncateMassage[] = "FILENAME TRUNCATED"; + strcpy(szFileNameRewrite + (PVR_MAX_FILEPATH_LEN - 1 - strlen(szTruncateMassage)), szTruncateMassage); + } + + pszTruncIter = szFileNameRewrite; + while(*pszTruncIter++ != 0) + { + IMG_CHAR* pszNextStartPoint; + /* Find '/../' pattern */ + if( + !( ( *pszTruncIter == '/' && (pszTruncIter-4 >= szFileNameRewrite) ) && + ( *(pszTruncIter-1) == '.') && + ( *(pszTruncIter-2) == '.') && + ( *(pszTruncIter-3) == '/') ) + ) continue; + + /* Find previous '/' */ + pszTruncBackInter = pszTruncIter - 3; + while(*(--pszTruncBackInter) != '/') + { + if(pszTruncBackInter <= szFileNameRewrite) break; + } + pszNextStartPoint = pszTruncBackInter; + + /* Remove found region */ + while(*pszTruncIter != 0) + { + *pszTruncBackInter++ = *pszTruncIter++; + } + *pszTruncBackInter = 0; + + /* Start again */ + pszTruncIter = pszNextStartPoint; + } + + pszFileName = szFileNameRewrite; + /* Remove first '/' if exist (it's always relative path */ + if(*pszFileName == '/') pszFileName++; +#endif + +#if !defined(__sh__) + pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '\\'); + + if (pszLeafName) + { + pszFileName = pszLeafName; + } +#endif /* __sh__ */ + + if (BAppend(pszBuf, ui32BufSiz, " [%u, %s]", ui32Line, pszFileName)) + { + printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf); + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + } + else + { + printk(KERN_INFO "%s\n", pszBuf); + } + } + + ReleaseBufferLock(ulLockFlags); + + va_end (vaArgs); + } +} + +#endif /* PVRSRV_NEED_PVR_DPF */ + +#if defined(DEBUG) + +IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data) +{ +#define _PROC_SET_BUFFER_SZ 6 + IMG_CHAR data_buffer[_PROC_SET_BUFFER_SZ]; + + if (count > _PROC_SET_BUFFER_SZ) + { + return -EINVAL; + } + else + { + if (pvr_copy_from_user(data_buffer, buffer, count)) + return -EINVAL; + if (data_buffer[count - 1] != '\n') + return -EINVAL; + if (sscanf(data_buffer, "%i", &gPVRDebugLevel) == 0) + return -EINVAL; + gPVRDebugLevel &= (1 << DBGPRIV_DBGLEVEL_COUNT) - 1; + } + return (count); +} + +void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el) +{ + seq_printf(sfile, "%u\n", gPVRDebugLevel); +} + +#endif /* defined(DEBUG) */ diff --git a/pvr-source/services4/srvkm/env/linux/pvr_uaccess.h b/pvr-source/services4/srvkm/env/linux/pvr_uaccess.h new file mode 100644 index 0000000..7583d7e --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/pvr_uaccess.h @@ -0,0 +1,88 @@ +/*************************************************************************/ /*! +@Title Utility functions for user space access +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __PVR_UACCESS_H__ +#define __PVR_UACCESS_H__ + +#include <linux/version.h> + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#endif + +#include <asm/uaccess.h> + +static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + if (access_ok(VERIFY_WRITE, pvTo, ulBytes)) + { + return __copy_to_user(pvTo, pvFrom, ulBytes); + } + return ulBytes; +#else + return copy_to_user(pvTo, pvFrom, ulBytes); +#endif +} + +static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) + /* + * The compile time correctness checking introduced for copy_from_user in + * Linux 2.6.33 isn't fully comaptible with our usage of the function. + */ + if (access_ok(VERIFY_READ, pvFrom, ulBytes)) + { + return __copy_from_user(pvTo, pvFrom, ulBytes); + } + return ulBytes; +#else + return copy_from_user(pvTo, pvFrom, ulBytes); +#endif +} + +#define pvr_put_user put_user +#define pvr_get_user get_user + +#endif /* __PVR_UACCESS_H__ */ + diff --git a/pvr-source/services4/srvkm/env/linux/sysfs.c b/pvr-source/services4/srvkm/env/linux/sysfs.c new file mode 100644 index 0000000..63066ad --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/sysfs.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/stat.h> +#include <asm/page.h> +#include <linux/slab.h> +#include "services_headers.h" +#include "pdump_km.h" +#include "sysfs.h" + +/* sysfs structures */ +struct pvrsrv_attribute { + struct attribute attr; + int sgx_version; + int sgx_revision; +}; + +static struct pvrsrv_attribute PVRSRVAttr = { + .attr.name = "egl.cfg", + .attr.mode = S_IRUGO, + .sgx_version = SGXCORE, + .sgx_revision = SGX_CORE_REV, +}; + +/* sysfs read function */ +static ssize_t PVRSRVEglCfgShow(struct kobject *kobj, struct attribute *attr, + char *buffer) { + struct pvrsrv_attribute *pvrsrv_attr; + + pvrsrv_attr = container_of(attr, struct pvrsrv_attribute, attr); + return snprintf(buffer, PAGE_SIZE, "0 0 android\n0 1 POWERVR_SGX%d_%d", + pvrsrv_attr->sgx_version, pvrsrv_attr->sgx_revision); +} + +/* sysfs write function unsupported*/ +static ssize_t PVRSRVEglCfgStore(struct kobject *kobj, struct attribute *attr, + const char *buffer, size_t size) { + PVR_DPF((PVR_DBG_ERROR, "PVRSRVEglCfgStore not implemented")); + return 0; +} + +static struct attribute *pvrsrv_sysfs_attrs[] = { + &PVRSRVAttr.attr, + NULL +}; + +static const struct sysfs_ops pvrsrv_sysfs_ops = { + .show = PVRSRVEglCfgShow, + .store = PVRSRVEglCfgStore, +}; + +static struct kobj_type pvrsrv_ktype = { + .sysfs_ops = &pvrsrv_sysfs_ops, + .default_attrs = pvrsrv_sysfs_attrs, +}; + +/* create sysfs entry /sys/egl/egl.cfg to determine + which gfx libraries to load */ + +int PVRSRVCreateSysfsEntry(void) +{ + struct kobject *egl_cfg_kobject; + int r; + + egl_cfg_kobject = kzalloc(sizeof(*egl_cfg_kobject), GFP_KERNEL); + r = kobject_init_and_add(egl_cfg_kobject, &pvrsrv_ktype, NULL, "egl"); + + if (r) { + PVR_DPF((PVR_DBG_ERROR, + "Failed to create egl.cfg sysfs entry")); + return PVRSRV_ERROR_INIT_FAILURE; + } + + return PVRSRV_OK; +} diff --git a/pvr-source/services4/srvkm/env/linux/sysfs.h b/pvr-source/services4/srvkm/env/linux/sysfs.h new file mode 100644 index 0000000..fb8d20f --- /dev/null +++ b/pvr-source/services4/srvkm/env/linux/sysfs.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2012 Texas Instruments, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __SYSFS_H +#define __SYSFS_H + +int PVRSRVCreateSysfsEntry(void); + +#endif diff --git a/pvr-source/services4/srvkm/hwdefs/mnemedefs.h b/pvr-source/services4/srvkm/hwdefs/mnemedefs.h new file mode 100644 index 0000000..83a65f5 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/mnemedefs.h @@ -0,0 +1,117 @@ +/*************************************************************************/ /*! +@Title Hardware defs for MNEME. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _MNEMEDEFS_KM_H_ +#define _MNEMEDEFS_KM_H_ + +/* Register MNE_CR_CTRL */ +#define MNE_CR_CTRL 0x0D00 +#define MNE_CR_CTRL_BYP_CC_N_MASK 0x00010000U +#define MNE_CR_CTRL_BYP_CC_N_SHIFT 16 +#define MNE_CR_CTRL_BYP_CC_N_SIGNED 0 +#define MNE_CR_CTRL_BYP_CC_MASK 0x00008000U +#define MNE_CR_CTRL_BYP_CC_SHIFT 15 +#define MNE_CR_CTRL_BYP_CC_SIGNED 0 +#define MNE_CR_CTRL_USE_INVAL_REQ_MASK 0x00007800U +#define MNE_CR_CTRL_USE_INVAL_REQ_SHIFT 11 +#define MNE_CR_CTRL_USE_INVAL_REQ_SIGNED 0 +#define MNE_CR_CTRL_BYPASS_ALL_MASK 0x00000400U +#define MNE_CR_CTRL_BYPASS_ALL_SHIFT 10 +#define MNE_CR_CTRL_BYPASS_ALL_SIGNED 0 +#define MNE_CR_CTRL_BYPASS_MASK 0x000003E0U +#define MNE_CR_CTRL_BYPASS_SHIFT 5 +#define MNE_CR_CTRL_BYPASS_SIGNED 0 +#define MNE_CR_CTRL_PAUSE_MASK 0x00000010U +#define MNE_CR_CTRL_PAUSE_SHIFT 4 +#define MNE_CR_CTRL_PAUSE_SIGNED 0 +/* Register MNE_CR_USE_INVAL */ +#define MNE_CR_USE_INVAL 0x0D04 +#define MNE_CR_USE_INVAL_ADDR_MASK 0xFFFFFFFFU +#define MNE_CR_USE_INVAL_ADDR_SHIFT 0 +#define MNE_CR_USE_INVAL_ADDR_SIGNED 0 +/* Register MNE_CR_STAT */ +#define MNE_CR_STAT 0x0D08 +#define MNE_CR_STAT_PAUSED_MASK 0x00000400U +#define MNE_CR_STAT_PAUSED_SHIFT 10 +#define MNE_CR_STAT_PAUSED_SIGNED 0 +#define MNE_CR_STAT_READS_MASK 0x000003FFU +#define MNE_CR_STAT_READS_SHIFT 0 +#define MNE_CR_STAT_READS_SIGNED 0 +/* Register MNE_CR_STAT_STATS */ +#define MNE_CR_STAT_STATS 0x0D0C +#define MNE_CR_STAT_STATS_RST_MASK 0x000FFFF0U +#define MNE_CR_STAT_STATS_RST_SHIFT 4 +#define MNE_CR_STAT_STATS_RST_SIGNED 0 +#define MNE_CR_STAT_STATS_SEL_MASK 0x0000000FU +#define MNE_CR_STAT_STATS_SEL_SHIFT 0 +#define MNE_CR_STAT_STATS_SEL_SIGNED 0 +/* Register MNE_CR_STAT_STATS_OUT */ +#define MNE_CR_STAT_STATS_OUT 0x0D10 +#define MNE_CR_STAT_STATS_OUT_VALUE_MASK 0xFFFFFFFFU +#define MNE_CR_STAT_STATS_OUT_VALUE_SHIFT 0 +#define MNE_CR_STAT_STATS_OUT_VALUE_SIGNED 0 +/* Register MNE_CR_EVENT_STATUS */ +#define MNE_CR_EVENT_STATUS 0x0D14 +#define MNE_CR_EVENT_STATUS_INVAL_MASK 0x00000001U +#define MNE_CR_EVENT_STATUS_INVAL_SHIFT 0 +#define MNE_CR_EVENT_STATUS_INVAL_SIGNED 0 +/* Register MNE_CR_EVENT_CLEAR */ +#define MNE_CR_EVENT_CLEAR 0x0D18 +#define MNE_CR_EVENT_CLEAR_INVAL_MASK 0x00000001U +#define MNE_CR_EVENT_CLEAR_INVAL_SHIFT 0 +#define MNE_CR_EVENT_CLEAR_INVAL_SIGNED 0 +/* Register MNE_CR_CTRL_INVAL */ +#define MNE_CR_CTRL_INVAL 0x0D20 +#define MNE_CR_CTRL_INVAL_PREQ_PDS_MASK 0x00000008U +#define MNE_CR_CTRL_INVAL_PREQ_PDS_SHIFT 3 +#define MNE_CR_CTRL_INVAL_PREQ_PDS_SIGNED 0 +#define MNE_CR_CTRL_INVAL_PREQ_USEC_MASK 0x00000004U +#define MNE_CR_CTRL_INVAL_PREQ_USEC_SHIFT 2 +#define MNE_CR_CTRL_INVAL_PREQ_USEC_SIGNED 0 +#define MNE_CR_CTRL_INVAL_PREQ_CACHE_MASK 0x00000002U +#define MNE_CR_CTRL_INVAL_PREQ_CACHE_SHIFT 1 +#define MNE_CR_CTRL_INVAL_PREQ_CACHE_SIGNED 0 +#define MNE_CR_CTRL_INVAL_ALL_MASK 0x00000001U +#define MNE_CR_CTRL_INVAL_ALL_SHIFT 0 +#define MNE_CR_CTRL_INVAL_ALL_SIGNED 0 + +#endif /* _MNEMEDEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/ocpdefs.h b/pvr-source/services4/srvkm/hwdefs/ocpdefs.h new file mode 100644 index 0000000..07a6412 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/ocpdefs.h @@ -0,0 +1,308 @@ +/*************************************************************************/ /*! +@Title OCP HW definitions. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _OCPDEFS_H_ +#define _OCPDEFS_H_ + +/* Register EUR_CR_OCP_REVISION */ +#define EUR_CR_OCP_REVISION 0xFE00 +#define EUR_CR_OCP_REVISION_REV_MASK 0xFFFFFFFFUL +#define EUR_CR_OCP_REVISION_REV_SHIFT 0 +#define EUR_CR_OCP_REVISION_REV_SIGNED 0 + +/* Register EUR_CR_OCP_HWINFO */ +#define EUR_CR_OCP_HWINFO 0xFE04 +#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_MASK 0x00000003UL +#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SHIFT 0 +#define EUR_CR_OCP_HWINFO_SYS_BUS_WIDTH_SIGNED 0 + +#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_MASK 0x00000004UL +#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SHIFT 2 +#define EUR_CR_OCP_HWINFO_MEM_BUS_WIDTH_SIGNED 0 + +/* Register EUR_CR_OCP_SYSCONFIG */ +#define EUR_CR_OCP_SYSCONFIG 0xFE10 +#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_MASK 0x0000000CUL +#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 2 +#define EUR_CR_OCP_SYSCONFIG_IDLE_MODE_SIGNED 0 + +#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_MASK 0x00000030UL +#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 4 +#define EUR_CR_OCP_SYSCONFIG_STANDBY_MODE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_RAW_0 */ +#define EUR_CR_OCP_IRQSTATUS_RAW_0 0xFE24 +#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_RAW_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_RAW_1 */ +#define EUR_CR_OCP_IRQSTATUS_RAW_1 0xFE28 +#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_RAW_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_RAW_2 */ +#define EUR_CR_OCP_IRQSTATUS_RAW_2 0xFE2C +#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_RAW_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_0 */ +#define EUR_CR_OCP_IRQSTATUS_0 0xFE30 +#define EUR_CR_OCP_IRQSTATUS_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_1 */ +#define EUR_CR_OCP_IRQSTATUS_1 0xFE34 +#define EUR_CR_OCP_IRQSTATUS_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQSTATUS_2 */ +#define EUR_CR_OCP_IRQSTATUS_2 0xFE38 +#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQSTATUS_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_SET_0 */ +#define EUR_CR_OCP_IRQENABLE_SET_0 0xFE3C +#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_SET_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_SET_1 */ +#define EUR_CR_OCP_IRQENABLE_SET_1 0xFE40 +#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_SET_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_SET_2 */ +#define EUR_CR_OCP_IRQENABLE_SET_2 0xFE44 +#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_SET_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_CLR_0 */ +#define EUR_CR_OCP_IRQENABLE_CLR_0 0xFE48 +#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_CLR_0_INIT_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_CLR_1 */ +#define EUR_CR_OCP_IRQENABLE_CLR_1 0xFE4C +#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_CLR_1_TARGET_SIGNED 0 + +/* Register EUR_CR_OCP_IRQENABLE_CLR_2 */ +#define EUR_CR_OCP_IRQENABLE_CLR_2 0xFE50 +#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_MASK 0x00000001UL +#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SHIFT 0 +#define EUR_CR_OCP_IRQENABLE_CLR_2_SGXCORE_SIGNED 0 + +/* Register EUR_CR_OCP_PAGE_CONFIG */ +#define EUR_CR_OCP_PAGE_CONFIG 0xFF00 +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_MASK 0x00000001UL +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SHIFT 0 +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_SIZE_SIGNED 0 + +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_MASK 0x00000004UL +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SHIFT 2 +#define EUR_CR_OCP_PAGE_CONFIG_MEM_PAGE_CHECK_ENABLE_SIGNED 0 + +#define EUR_CR_OCP_PAGE_CONFIG_SIZE_MASK 0x00000018UL +#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SHIFT 3 +#define EUR_CR_OCP_PAGE_CONFIG_SIZE_SIGNED 0 + +/* Register EUR_CR_OCP_INTERRUPT_EVENT */ +#define EUR_CR_OCP_INTERRUPT_EVENT 0xFF04 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_MASK 0x00000001UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SHIFT 0 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNEXPECTED_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_MASK 0x00000002UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SHIFT 1 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_UNUSED_TAG_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_MASK 0x00000004UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SHIFT 2 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_RESP_ERROR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_MASK 0x00000008UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SHIFT 3 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_PAGE_CROSS_ERROR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_MASK 0x00000010UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SHIFT 4 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_READ_TAG_FIFO_OVR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_MASK 0x00000020UL +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SHIFT 5 +#define EUR_CR_OCP_INTERRUPT_EVENT_INIT_MEM_REQ_FIFO_OVR_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_MASK 0x00000100UL +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SHIFT 8 +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_RESP_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_MASK 0x00000200UL +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SHIFT 9 +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_CMD_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_MASK 0x00000400UL +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SHIFT 10 +#define EUR_CR_OCP_INTERRUPT_EVENT_TARGET_INVALID_OCP_CMD_SIGNED 0 + +/* Register EUR_CR_OCP_DEBUG_CONFIG */ +#define EUR_CR_OCP_DEBUG_CONFIG 0xFF08 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_MASK 0x00000003UL +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SHIFT 0 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_TARGET_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_MASK 0x0000000CUL +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SHIFT 2 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_INIT_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_MASK 0x00000010UL +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SHIFT 4 +#define EUR_CR_OCP_DEBUG_CONFIG_FORCE_PASS_DATA_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_MASK 0x00000020UL +#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SHIFT 5 +#define EUR_CR_OCP_DEBUG_CONFIG_SELECT_INIT_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_MASK 0x80000000UL +#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SHIFT 31 +#define EUR_CR_OCP_DEBUG_CONFIG_THALIA_INT_BYPASS_SIGNED 0 + +/* Register EUR_CR_OCP_DEBUG_STATUS */ +#define EUR_CR_OCP_DEBUG_STATUS 0xFF0C +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_MASK 0x00000003UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SHIFT 0 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_MCONNECT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_MASK 0x00000004UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SHIFT 2 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SCONNECT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_MASK 0x00000008UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SHIFT 3 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEREQ_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_MASK 0x00000030UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SHIFT 4 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SDISCACK_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_MASK 0x000000C0UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SHIFT 6 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_SIDLEACK_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_MASK 0x00000300UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SHIFT 8 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MCONNECT0_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_MASK 0x00000400UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SHIFT 10 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT0_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_MASK 0x00000800UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SHIFT 11 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT1_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_MASK 0x00001000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SHIFT 12 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_SCONNECT2_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_MASK 0x00006000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SHIFT 13 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCACK_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_MASK 0x00008000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SHIFT 15 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MDISCREQ_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_MASK 0x00010000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SHIFT 16 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MWAIT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_MASK 0x00020000UL +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SHIFT 17 +#define EUR_CR_OCP_DEBUG_STATUS_INIT_MSTANDBY_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_MASK 0x001C0000UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SHIFT 18 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_CMD_OUT_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_MASK 0x03E00000UL +#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SHIFT 21 +#define EUR_CR_OCP_DEBUG_STATUS_WHICH_TARGET_REGISTER_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_MASK 0x04000000UL +#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SHIFT 26 +#define EUR_CR_OCP_DEBUG_STATUS_RESP_ERROR_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_MASK 0x08000000UL +#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SHIFT 27 +#define EUR_CR_OCP_DEBUG_STATUS_CMD_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_MASK 0x10000000UL +#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SHIFT 28 +#define EUR_CR_OCP_DEBUG_STATUS_RESP_FIFO_FULL_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_MASK 0x20000000UL +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SHIFT 29 +#define EUR_CR_OCP_DEBUG_STATUS_TARGET_IDLE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_MASK 0x40000000UL +#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SHIFT 30 +#define EUR_CR_OCP_DEBUG_STATUS_CMD_RESP_DEBUG_STATE_SIGNED 0 + +#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_MASK 0x80000000UL +#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SHIFT 31 +#define EUR_CR_OCP_DEBUG_STATUS_CMD_DEBUG_STATE_SIGNED 0 + + +#endif /* _OCPDEFS_H_ */ + +/***************************************************************************** + End of file (ocpdefs.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/hwdefs/sgx520defs.h b/pvr-source/services4/srvkm/hwdefs/sgx520defs.h new file mode 100644 index 0000000..80c3363 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx520defs.h @@ -0,0 +1,555 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX520. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX520DEFS_KM_H_ +#define _SGX520DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0004 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x0008 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0010 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0014 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0018 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x001C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012C +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_USE_CODE_BASE_0 */ +#define EUR_CR_USE_CODE_BASE_0 0x0A0C +#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_1 */ +#define EUR_CR_USE_CODE_BASE_1 0x0A10 +#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_2 */ +#define EUR_CR_USE_CODE_BASE_2 0x0A14 +#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_3 */ +#define EUR_CR_USE_CODE_BASE_3 0x0A18 +#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_4 */ +#define EUR_CR_USE_CODE_BASE_4 0x0A1C +#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_5 */ +#define EUR_CR_USE_CODE_BASE_5 0x0A20 +#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_6 */ +#define EUR_CR_USE_CODE_BASE_6 0x0A24 +#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_7 */ +#define EUR_CR_USE_CODE_BASE_7 0x0A28 +#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_8 */ +#define EUR_CR_USE_CODE_BASE_8 0x0A2C +#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_9 */ +#define EUR_CR_USE_CODE_BASE_9 0x0A30 +#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_10 */ +#define EUR_CR_USE_CODE_BASE_10 0x0A34 +#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_11 */ +#define EUR_CR_USE_CODE_BASE_11 0x0A38 +#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_12 */ +#define EUR_CR_USE_CODE_BASE_12 0x0A3C +#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_13 */ +#define EUR_CR_USE_CODE_BASE_13 0x0A40 +#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_14 */ +#define EUR_CR_USE_CODE_BASE_14 0x0A44 +#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 20 +/* Register EUR_CR_USE_CODE_BASE_15 */ +#define EUR_CR_USE_CODE_BASE_15 0x0A48 +#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 20 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV2 */ +#define EUR_CR_PDS_INV2 0x0AD8 +#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV2_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +/* Register EUR_CR_PDS_PC_BASE */ +#define EUR_CR_PDS_PC_BASE 0x0B2C +#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFU +#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U +#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x000FFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x00300000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 20 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX520DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx530defs.h b/pvr-source/services4/srvkm/hwdefs/sgx530defs.h new file mode 100644 index 0000000..3223feb --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx530defs.h @@ -0,0 +1,542 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX530. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX530DEFS_KM_H_ +#define _SGX530DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0004 +#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x0008 +#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0010 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0014 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0018 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x001C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012CU +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV2 */ +#define EUR_CR_PDS_INV2 0x0AD8 +#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV2_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +/* Register EUR_CR_PDS_PC_BASE */ +#define EUR_CR_PDS_PC_BASE 0x0B2C +#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFU +#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U +#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TWOD_REQ_BASE */ +#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88 +#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 +#define EUR_CR_MNE_CR_CTRL 0x0D00 +#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_MASK 0x00010000U +#define EUR_CR_MNE_CR_CTRL_BYP_CC_N_SHIFT 16 +#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000U +#define EUR_CR_MNE_CR_CTRL_BYP_CC_SHIFT 15 +#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_MASK 0x00007800U +#define EUR_CR_MNE_CR_CTRL_USE_INVAL_ADDR_SHIFT 11 +#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK 0x00000400U +#define EUR_CR_MNE_CR_CTRL_BYPASS_ALL_SHIFT 10 +#define EUR_CR_MNE_CR_CTRL_BYPASS_MASK 0x000003E0U +#define EUR_CR_MNE_CR_CTRL_BYPASS_SHIFT 5 +#define EUR_CR_MNE_CR_CTRL_PAUSE_MASK 0x00000010U +#define EUR_CR_MNE_CR_CTRL_PAUSE_SHIFT 4 +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_MASK 0x0000000EU +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT 1 +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_PDS_MASK (1UL<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+2) +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_USEC_MASK (1UL<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT+1) +#define EUR_CR_MNE_CR_CTRL_INVAL_PREQ_CACHE_MASK (1UL<<EUR_CR_MNE_CR_CTRL_INVAL_PREQ_SHIFT) +#define EUR_CR_MNE_CR_CTRL_INVAL_ALL_MASK 0x00000001U +#define EUR_CR_MNE_CR_CTRL_INVAL_ALL_SHIFT 0 +#define EUR_CR_MNE_CR_USE_INVAL 0x0D04 +#define EUR_CR_MNE_CR_USE_INVAL_ADDR_MASK 0xFFFFFFFFU +#define EUR_CR_MNE_CR_USE_INVAL_ADDR_SHIFT 0 +#define EUR_CR_MNE_CR_STAT 0x0D08 +#define EUR_CR_MNE_CR_STAT_PAUSED_MASK 0x00000400U +#define EUR_CR_MNE_CR_STAT_PAUSED_SHIFT 10 +#define EUR_CR_MNE_CR_STAT_READS_MASK 0x000003FFU +#define EUR_CR_MNE_CR_STAT_READS_SHIFT 0 +#define EUR_CR_MNE_CR_STAT_STATS 0x0D0C +#define EUR_CR_MNE_CR_STAT_STATS_RST_MASK 0x000FFFF0U +#define EUR_CR_MNE_CR_STAT_STATS_RST_SHIFT 4 +#define EUR_CR_MNE_CR_STAT_STATS_SEL_MASK 0x0000000FU +#define EUR_CR_MNE_CR_STAT_STATS_SEL_SHIFT 0 +#define EUR_CR_MNE_CR_STAT_STATS_OUT 0x0D10 +#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_MNE_CR_STAT_STATS_OUT_VALUE_SHIFT 0 +#define EUR_CR_MNE_CR_EVENT_STATUS 0x0D14 +#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_MASK 0x00000001U +#define EUR_CR_MNE_CR_EVENT_STATUS_INVAL_SHIFT 0 +#define EUR_CR_MNE_CR_EVENT_CLEAR 0x0D18 +#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_MASK 0x00000001U +#define EUR_CR_MNE_CR_EVENT_CLEAR_INVAL_SHIFT 0 +#define EUR_CR_MNE_CR_CTRL_INVAL 0x0D20 + +#endif /* _SGX530DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx531defs.h b/pvr-source/services4/srvkm/hwdefs/sgx531defs.h new file mode 100644 index 0000000..3295d89 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx531defs.h @@ -0,0 +1,601 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX531. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX531DEFS_KM_H_ +#define _SGX531DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28 +/* Register EUR_CR_CLKGATECTL2 */ +#define EUR_CR_CLKGATECTL2 0x0004 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0008 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200U +#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13 +#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000U +#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17 +#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000U +#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x000C +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0020 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0024 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0028 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x002C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7 +#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U +#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8 +#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U +#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9 +#define EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK 0x00000400U +#define EUR_CR_SOFT_RESET_CACHEL2_RESET_SHIFT 10 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11 +#define EUR_CR_SOFT_RESET_MADD_RESET_MASK 0x00001000U +#define EUR_CR_SOFT_RESET_MADD_RESET_SHIFT 12 +#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U +#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13 +#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U +#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012CU +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_EVENT_KICK1 */ +#define EUR_CR_EVENT_KICK1 0x0AB0 +#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU +#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_EVENT_KICK2 */ +#define EUR_CR_EVENT_KICK2 0x0AC0 +#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +/* Register EUR_CR_EVENT_KICK3 */ +#define EUR_CR_EVENT_KICK3 0x0AD8 +#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +/* Register EUR_CR_PDS_PC_BASE */ +#define EUR_CR_PDS_PC_BASE 0x0B2C +#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x00FFFFFFU +#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U +#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U +#define EUR_CR_BIF_FAULT_SB_SHIFT 4 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX531DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx535defs.h b/pvr-source/services4/srvkm/hwdefs/sgx535defs.h new file mode 100644 index 0000000..8039da4 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx535defs.h @@ -0,0 +1,739 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX535. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX535DEFS_KM_H_ +#define _SGX535DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0004 +#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001 +#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x0008 +#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0010 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0014 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0018 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x001C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 7 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 6 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 5 +#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012CU +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV2 */ +#define EUR_CR_PDS_INV2 0x0AD8 +#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV2_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +/* Register EUR_CR_PDS_PC_BASE */ +#define EUR_CR_PDS_PC_BASE 0x0B2C +#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFU +#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK 0x00010000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SHIFT 16 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U +#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TILE0 */ +#define EUR_CR_BIF_TILE0 0x0C0C +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE0_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE1 */ +#define EUR_CR_BIF_TILE1 0x0C10 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE1_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE2 */ +#define EUR_CR_BIF_TILE2 0x0C14 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE2_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE3 */ +#define EUR_CR_BIF_TILE3 0x0C18 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE3_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE4 */ +#define EUR_CR_BIF_TILE4 0x0C1C +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE4_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE5 */ +#define EUR_CR_BIF_TILE5 0x0C20 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE5_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE6 */ +#define EUR_CR_BIF_TILE6 0x0C24 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE6_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE7 */ +#define EUR_CR_BIF_TILE7 0x0C28 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE7_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE8 */ +#define EUR_CR_BIF_TILE8 0x0C2C +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE8_CFG_SHIFT 24 +/* Register EUR_CR_BIF_TILE9 */ +#define EUR_CR_BIF_TILE9 0x0C30 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE9_CFG_SHIFT 24 +/* Register EUR_CR_BIF_DIR_LIST_BASE1 */ +#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE2 */ +#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE3 */ +#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE4 */ +#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE5 */ +#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE6 */ +#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE7 */ +#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE8 */ +#define EUR_CR_BIF_DIR_LIST_BASE8 0x0C54 +#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE9 */ +#define EUR_CR_BIF_DIR_LIST_BASE9 0x0C58 +#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE10 */ +#define EUR_CR_BIF_DIR_LIST_BASE10 0x0C5C +#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE11 */ +#define EUR_CR_BIF_DIR_LIST_BASE11 0x0C60 +#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE12 */ +#define EUR_CR_BIF_DIR_LIST_BASE12 0x0C64 +#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE13 */ +#define EUR_CR_BIF_DIR_LIST_BASE13 0x0C68 +#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE14 */ +#define EUR_CR_BIF_DIR_LIST_BASE14 0x0C6C +#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE15 */ +#define EUR_CR_BIF_DIR_LIST_BASE15 0x0C70 +#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_BANK_SET */ +#define EUR_CR_BIF_BANK_SET 0x0C74 +#define EUR_CR_BIF_BANK_SET_SELECT_MASK 0x000003FFU +#define EUR_CR_BIF_BANK_SET_SELECT_SHIFT 0 +/* Register EUR_CR_BIF_BANK0 */ +#define EUR_CR_BIF_BANK0 0x0C78 +#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK0_INDEX_HOST_MASK 0x00000F00U +#define EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT 8 +#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK0_INDEX_2D_MASK 0x000F0000U +#define EUR_CR_BIF_BANK0_INDEX_2D_SHIFT 16 +/* Register EUR_CR_BIF_BANK1 */ +#define EUR_CR_BIF_BANK1 0x0C7C +#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK1_INDEX_HOST_MASK 0x00000F00U +#define EUR_CR_BIF_BANK1_INDEX_HOST_SHIFT 8 +#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK1_INDEX_2D_MASK 0x000F0000U +#define EUR_CR_BIF_BANK1_INDEX_2D_SHIFT 16 +/* Register EUR_CR_BIF_ADT_TTE */ +#define EUR_CR_BIF_ADT_TTE 0x0C80 +#define EUR_CR_BIF_ADT_TTE_VALUE_MASK 0x000000FFU +#define EUR_CR_BIF_ADT_TTE_VALUE_SHIFT 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TWOD_REQ_BASE */ +#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88 +#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_MEM_ARB_FLOWRATES_1 */ +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1 0x0C94 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_MASK 0x00000007U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_SHIFT 0 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_MASK 0x00000038U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_SHIFT 3 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_MASK 0x000001C0U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_SHIFT 6 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_MASK 0x00000E00U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_SHIFT 9 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_MASK 0x00007000U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_SHIFT 12 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_MASK 0x00038000U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_SHIFT 15 +/* Register EUR_CR_BIF_MEM_ARB_FLOWRATES_2 */ +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2 0x0C98 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_MASK 0x00000007U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_SHIFT 0 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_MASK 0x00000038U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_SHIFT 3 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_MASK 0x000001C0U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_SHIFT 6 +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_MASK 0x00000E00U +#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_SHIFT 9 +/* Register EUR_CR_BIF_MEM_ARB_CONFIG */ +#define EUR_CR_BIF_MEM_ARB_CONFIG 0x0CA0 +#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_MASK 0x0000000FU +#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT 0 +#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_MASK 0x00000FF0U +#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT 4 +#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_MASK 0x00FFF000U +#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT 12 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_BANK_STATUS */ +#define EUR_CR_BIF_BANK_STATUS 0x0CB4 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +/* Register EUR_CR_2D_SOCIF */ +#define EUR_CR_2D_SOCIF 0x0E18 +#define EUR_CR_2D_SOCIF_FREESPACE_MASK 0x000000FFU +#define EUR_CR_2D_SOCIF_FREESPACE_SHIFT 0 +/* Register EUR_CR_2D_ALPHA */ +#define EUR_CR_2D_ALPHA 0x0E1C +#define EUR_CR_2D_ALPHA_COMPONENT_ONE_MASK 0x0000FF00U +#define EUR_CR_2D_ALPHA_COMPONENT_ONE_SHIFT 8 +#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_MASK 0x000000FFU +#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_SHIFT 0 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 25 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX535DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx540defs.h b/pvr-source/services4/srvkm/hwdefs/sgx540defs.h new file mode 100644 index 0000000..47080c7 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx540defs.h @@ -0,0 +1,605 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX540. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX540DEFS_KM_H_ +#define _SGX540DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28 +/* Register EUR_CR_CLKGATECTL2 */ +#define EUR_CR_CLKGATECTL2 0x0004 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0008 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200U +#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13 +#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000U +#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17 +#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000U +#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x000C +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18 +/* Register EUR_CR_POWER */ +#define EUR_CR_POWER 0x001C +#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U +#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0020 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0024 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0028 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x002C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7 +#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U +#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8 +#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U +#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9 +#define EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK 0x00000400U +#define EUR_CR_SOFT_RESET_CACHEL2_RESET_SHIFT 10 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11 +#define EUR_CR_SOFT_RESET_MADD_RESET_MASK 0x00001000U +#define EUR_CR_SOFT_RESET_MADD_RESET_SHIFT 12 +#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U +#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13 +#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U +#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012CU +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000U +#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_EVENT_KICK1 */ +#define EUR_CR_EVENT_KICK1 0x0AB0 +#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU +#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_EVENT_KICK2 */ +#define EUR_CR_EVENT_KICK2 0x0AC0 +#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0x0FFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +/* Register EUR_CR_EVENT_KICK3 */ +#define EUR_CR_EVENT_KICK3 0x0AD8 +#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +/* Register EUR_CR_PDS_PC_BASE */ +#define EUR_CR_PDS_PC_BASE 0x0B2C +#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x00FFFFFFU +#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000U +#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00008000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 15 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U +#define EUR_CR_BIF_FAULT_SB_SHIFT 4 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0x0FFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0x0FF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x00FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x03000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 24 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX540DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx543_v1.164defs.h b/pvr-source/services4/srvkm/hwdefs/sgx543_v1.164defs.h new file mode 100644 index 0000000..8c8b353 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx543_v1.164defs.h @@ -0,0 +1,1396 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX543_V1.164. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX543_V1_164DEFS_KM_H_ +#define _SGX543_V1_164DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL_PDS_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL_TA_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SIGNED 0 +/* Register EUR_CR_CLKGATECTL2 */ +#define EUR_CR_CLKGATECTL2 0x0004 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_MASK 0x00C00000U +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SHIFT 22 +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_MASK 0x03000000U +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SHIFT 24 +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_MASK 0x0C000000U +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SHIFT 26 +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SIGNED 0 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0008 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_MASK 0x00000200U +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SHIFT 9 +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_MASK 0x00200000U +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SHIFT 21 +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_MASK 0x00400000U +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SHIFT 22 +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_MASK 0x00800000U +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SHIFT 23 +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_MASK 0x01000000U +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SHIFT 24 +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SIGNED 0 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x000C +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SHIFT 20 +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SIGNED 0 +/* Register EUR_CR_POWER */ +#define EUR_CR_POWER 0x001C +#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U +#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0 +#define EUR_CR_POWER_PIPE_DISABLE_SIGNED 0 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0020 +#define EUR_CR_CORE_ID_CONFIG_MULTI_MASK 0x00000001U +#define EUR_CR_CORE_ID_CONFIG_MULTI_SHIFT 0 +#define EUR_CR_CORE_ID_CONFIG_MULTI_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_BASE_MASK 0x00000002U +#define EUR_CR_CORE_ID_CONFIG_BASE_SHIFT 1 +#define EUR_CR_CORE_ID_CONFIG_BASE_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x000000FCU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 2 +#define EUR_CR_CORE_ID_CONFIG_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_CORES_MASK 0x00000F00U +#define EUR_CR_CORE_ID_CONFIG_CORES_SHIFT 8 +#define EUR_CR_CORE_ID_CONFIG_CORES_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_SLC_MASK 0x0000F000U +#define EUR_CR_CORE_ID_CONFIG_SLC_SHIFT 12 +#define EUR_CR_CORE_ID_CONFIG_SLC_SIGNED 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +#define EUR_CR_CORE_ID_ID_SIGNED 0 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0024 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0028 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x002C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_VDM_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_TE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_MTE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6 +#define EUR_CR_SOFT_RESET_ISP2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7 +#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U +#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8 +#define EUR_CR_SOFT_RESET_PDS_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U +#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9 +#define EUR_CR_SOFT_RESET_PBE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK 0x00000400U +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SHIFT 10 +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U +#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13 +#define EUR_CR_SOFT_RESET_ITR_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U +#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14 +#define EUR_CR_SOFT_RESET_TEX_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15 +#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17 +#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK 0x00040000U +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SHIFT 18 +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK 0x00080000U +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SHIFT 19 +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012C +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +#define EUR_CR_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_EVENT_KICK1 */ +#define EUR_CR_EVENT_KICK1 0x0AB0 +#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU +#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICK2 */ +#define EUR_CR_EVENT_KICK2 0x0AC0 +#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +#define EUR_CR_PDS_INV0_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +#define EUR_CR_PDS_INV1_DSC_SIGNED 0 +/* Register EUR_CR_EVENT_KICK3 */ +#define EUR_CR_EVENT_KICK3 0x0AD8 +#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +#define EUR_CR_PDS_INV3_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_MASK 0x00010000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SHIFT 16 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_MASK 0x00020000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SHIFT 17 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_MASK 0x00040000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SHIFT 18 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_MASK 0x00080000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SHIFT 19 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SIGNED 0 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU +#define EUR_CR_BIF_FAULT_CID_SHIFT 0 +#define EUR_CR_BIF_FAULT_CID_SIGNED 0 +#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U +#define EUR_CR_BIF_FAULT_SB_SHIFT 4 +#define EUR_CR_BIF_FAULT_SB_SIGNED 0 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TILE0 */ +#define EUR_CR_BIF_TILE0 0x0C0C +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE0_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE0_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE1 */ +#define EUR_CR_BIF_TILE1 0x0C10 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE1_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE1_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE2 */ +#define EUR_CR_BIF_TILE2 0x0C14 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE2_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE2_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE3 */ +#define EUR_CR_BIF_TILE3 0x0C18 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE3_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE3_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE4 */ +#define EUR_CR_BIF_TILE4 0x0C1C +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE4_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE4_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE5 */ +#define EUR_CR_BIF_TILE5 0x0C20 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE5_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE5_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE6 */ +#define EUR_CR_BIF_TILE6 0x0C24 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE6_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE6_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE7 */ +#define EUR_CR_BIF_TILE7 0x0C28 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE7_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE7_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE8 */ +#define EUR_CR_BIF_TILE8 0x0C2C +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE8_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE8_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE9 */ +#define EUR_CR_BIF_TILE9 0x0C30 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE9_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE9_CFG_SIGNED 0 +/* Register EUR_CR_BIF_CTRL_INVAL */ +#define EUR_CR_BIF_CTRL_INVAL 0x0C34 +#define EUR_CR_BIF_CTRL_INVAL_PTE_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_INVAL_PTE_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVAL_PTE_SIGNED 0 +#define EUR_CR_BIF_CTRL_INVAL_ALL_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVAL_ALL_SHIFT 3 +#define EUR_CR_BIF_CTRL_INVAL_ALL_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE1 */ +#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE2 */ +#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE3 */ +#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE4 */ +#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE5 */ +#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE6 */ +#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE7 */ +#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_SET */ +#define EUR_CR_BIF_BANK_SET 0x0C74 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0 +/* Register EUR_CR_BIF_BANK0 */ +#define EUR_CR_BIF_BANK0 0x0C78 +#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_PTLA_MASK 0x000F0000U +#define EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT 16 +#define EUR_CR_BIF_BANK0_INDEX_PTLA_SIGNED 0 +/* Register EUR_CR_BIF_BANK1 */ +#define EUR_CR_BIF_BANK1 0x0C7C +#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_STATUS */ +#define EUR_CR_BIF_BANK_STATUS 0x0CB4 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0 +/* Register EUR_CR_BIF_MMU_CTRL */ +#define EUR_CR_BIF_MMU_CTRL 0x0CD0 +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK 0x00000001U +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SHIFT 0 +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_MASK 0x00000006U +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT 1 +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_MASK 0x00000008U +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SHIFT 3 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK 0x00000010U +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SHIFT 4 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SIGNED 0 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SIGNED 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +#define EUR_CR_2D_BLIT_STATUS_BUSY_SIGNED 0 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SIGNED 0 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0_START */ +#define EUR_CR_BREAKPOINT0_START 0x0F44 +#define EUR_CR_BREAKPOINT0_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT0_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT0_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0_END */ +#define EUR_CR_BREAKPOINT0_END 0x0F48 +#define EUR_CR_BREAKPOINT0_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT0_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT0_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0 */ +#define EUR_CR_BREAKPOINT0 0x0F4C +#define EUR_CR_BREAKPOINT0_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT0_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT0_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1_START */ +#define EUR_CR_BREAKPOINT1_START 0x0F50 +#define EUR_CR_BREAKPOINT1_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT1_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT1_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1_END */ +#define EUR_CR_BREAKPOINT1_END 0x0F54 +#define EUR_CR_BREAKPOINT1_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT1_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT1_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1 */ +#define EUR_CR_BREAKPOINT1 0x0F58 +#define EUR_CR_BREAKPOINT1_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT1_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT1_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2_START */ +#define EUR_CR_BREAKPOINT2_START 0x0F5C +#define EUR_CR_BREAKPOINT2_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT2_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT2_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2_END */ +#define EUR_CR_BREAKPOINT2_END 0x0F60 +#define EUR_CR_BREAKPOINT2_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT2_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT2_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2 */ +#define EUR_CR_BREAKPOINT2 0x0F64 +#define EUR_CR_BREAKPOINT2_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT2_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT2_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3_START */ +#define EUR_CR_BREAKPOINT3_START 0x0F68 +#define EUR_CR_BREAKPOINT3_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT3_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT3_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3_END */ +#define EUR_CR_BREAKPOINT3_END 0x0F6C +#define EUR_CR_BREAKPOINT3_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT3_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT3_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3 */ +#define EUR_CR_BREAKPOINT3 0x0F70 +#define EUR_CR_BREAKPOINT3_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT3_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT3_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT_READ */ +#define EUR_CR_BREAKPOINT_READ 0x0F74 +#define EUR_CR_BREAKPOINT_READ_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT_READ_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT_READ_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT_TRAP */ +#define EUR_CR_BREAKPOINT_TRAP 0x0F78 +#define EUR_CR_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_BREAKPOINT */ +#define EUR_CR_BREAKPOINT 0x0F7C +#define EUR_CR_BREAKPOINT_MODULE_ID_MASK 0x000003C0U +#define EUR_CR_BREAKPOINT_MODULE_ID_SHIFT 6 +#define EUR_CR_BREAKPOINT_MODULE_ID_SIGNED 0 +#define EUR_CR_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_BREAKPOINT_TRAP_INFO0 0x0F80 +#define EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_BREAKPOINT_TRAP_INFO1 0x0F84 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_0 */ +#define EUR_CR_USE_CODE_BASE_0 0x0A0C +#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_1 */ +#define EUR_CR_USE_CODE_BASE_1 0x0A10 +#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_2 */ +#define EUR_CR_USE_CODE_BASE_2 0x0A14 +#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_3 */ +#define EUR_CR_USE_CODE_BASE_3 0x0A18 +#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_4 */ +#define EUR_CR_USE_CODE_BASE_4 0x0A1C +#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_5 */ +#define EUR_CR_USE_CODE_BASE_5 0x0A20 +#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_6 */ +#define EUR_CR_USE_CODE_BASE_6 0x0A24 +#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_7 */ +#define EUR_CR_USE_CODE_BASE_7 0x0A28 +#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_8 */ +#define EUR_CR_USE_CODE_BASE_8 0x0A2C +#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_9 */ +#define EUR_CR_USE_CODE_BASE_9 0x0A30 +#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_10 */ +#define EUR_CR_USE_CODE_BASE_10 0x0A34 +#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_11 */ +#define EUR_CR_USE_CODE_BASE_11 0x0A38 +#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_12 */ +#define EUR_CR_USE_CODE_BASE_12 0x0A3C +#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_13 */ +#define EUR_CR_USE_CODE_BASE_13 0x0A40 +#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_14 */ +#define EUR_CR_USE_CODE_BASE_14 0x0A44 +#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_15 */ +#define EUR_CR_USE_CODE_BASE_15 0x0A48 +#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX543_V1_164DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx543defs.h b/pvr-source/services4/srvkm/hwdefs/sgx543defs.h new file mode 100644 index 0000000..0d3568d --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx543defs.h @@ -0,0 +1,1487 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX543. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX543DEFS_KM_H_ +#define _SGX543DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL_PDS_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL_TA_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SIGNED 0 +/* Register EUR_CR_CLKGATECTL2 */ +#define EUR_CR_CLKGATECTL2 0x0004 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_MASK 0x00C00000U +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SHIFT 22 +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_MASK 0x03000000U +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SHIFT 24 +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_MASK 0x0C000000U +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SHIFT 26 +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SIGNED 0 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0008 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_MASK 0x00000200U +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SHIFT 9 +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_MASK 0x00200000U +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SHIFT 21 +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_MASK 0x00400000U +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SHIFT 22 +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_MASK 0x00800000U +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SHIFT 23 +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_MASK 0x01000000U +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SHIFT 24 +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SIGNED 0 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x000C +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SHIFT 20 +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SIGNED 0 +/* Register EUR_CR_POWER */ +#define EUR_CR_POWER 0x001C +#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U +#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0 +#define EUR_CR_POWER_PIPE_DISABLE_SIGNED 0 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0020 +#define EUR_CR_CORE_ID_CONFIG_MULTI_MASK 0x00000001U +#define EUR_CR_CORE_ID_CONFIG_MULTI_SHIFT 0 +#define EUR_CR_CORE_ID_CONFIG_MULTI_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_BASE_MASK 0x00000002U +#define EUR_CR_CORE_ID_CONFIG_BASE_SHIFT 1 +#define EUR_CR_CORE_ID_CONFIG_BASE_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x000000FCU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 2 +#define EUR_CR_CORE_ID_CONFIG_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_CORES_MASK 0x00000F00U +#define EUR_CR_CORE_ID_CONFIG_CORES_SHIFT 8 +#define EUR_CR_CORE_ID_CONFIG_CORES_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_SLC_MASK 0x0000F000U +#define EUR_CR_CORE_ID_CONFIG_SLC_SHIFT 12 +#define EUR_CR_CORE_ID_CONFIG_SLC_SIGNED 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +#define EUR_CR_CORE_ID_ID_SIGNED 0 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0024 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0028 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x002C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_VDM_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_TE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_MTE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6 +#define EUR_CR_SOFT_RESET_ISP2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7 +#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U +#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8 +#define EUR_CR_SOFT_RESET_PDS_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U +#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9 +#define EUR_CR_SOFT_RESET_PBE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK 0x00000400U +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SHIFT 10 +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U +#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13 +#define EUR_CR_SOFT_RESET_ITR_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U +#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14 +#define EUR_CR_SOFT_RESET_TEX_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15 +#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17 +#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK 0x00040000U +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SHIFT 18 +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK 0x00080000U +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SHIFT 19 +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012C +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +#define EUR_CR_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_EVENT_KICK1 */ +#define EUR_CR_EVENT_KICK1 0x0AB0 +#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU +#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICK2 */ +#define EUR_CR_EVENT_KICK2 0x0AC0 +#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +#define EUR_CR_PDS_INV0_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +#define EUR_CR_PDS_INV1_DSC_SIGNED 0 +/* Register EUR_CR_EVENT_KICK3 */ +#define EUR_CR_EVENT_KICK3 0x0AD8 +#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +#define EUR_CR_PDS_INV3_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_MASK 0x00010000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SHIFT 16 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_MASK 0x00020000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SHIFT 17 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_MASK 0x00040000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SHIFT 18 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_MASK 0x00080000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SHIFT 19 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SIGNED 0 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU +#define EUR_CR_BIF_FAULT_CID_SHIFT 0 +#define EUR_CR_BIF_FAULT_CID_SIGNED 0 +#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U +#define EUR_CR_BIF_FAULT_SB_SHIFT 4 +#define EUR_CR_BIF_FAULT_SB_SIGNED 0 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TILE0 */ +#define EUR_CR_BIF_TILE0 0x0C0C +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE0_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE0_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE1 */ +#define EUR_CR_BIF_TILE1 0x0C10 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE1_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE1_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE2 */ +#define EUR_CR_BIF_TILE2 0x0C14 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE2_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE2_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE3 */ +#define EUR_CR_BIF_TILE3 0x0C18 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE3_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE3_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE4 */ +#define EUR_CR_BIF_TILE4 0x0C1C +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE4_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE4_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE5 */ +#define EUR_CR_BIF_TILE5 0x0C20 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE5_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE5_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE6 */ +#define EUR_CR_BIF_TILE6 0x0C24 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE6_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE6_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE7 */ +#define EUR_CR_BIF_TILE7 0x0C28 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE7_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE7_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE8 */ +#define EUR_CR_BIF_TILE8 0x0C2C +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE8_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE8_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE9 */ +#define EUR_CR_BIF_TILE9 0x0C30 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE9_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE9_CFG_SIGNED 0 +/* Register EUR_CR_BIF_CTRL_INVAL */ +#define EUR_CR_BIF_CTRL_INVAL 0x0C34 +#define EUR_CR_BIF_CTRL_INVAL_PTE_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_INVAL_PTE_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVAL_PTE_SIGNED 0 +#define EUR_CR_BIF_CTRL_INVAL_ALL_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVAL_ALL_SHIFT 3 +#define EUR_CR_BIF_CTRL_INVAL_ALL_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE1 */ +#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE2 */ +#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE3 */ +#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE4 */ +#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE5 */ +#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE6 */ +#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE7 */ +#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_SET */ +#define EUR_CR_BIF_BANK_SET 0x0C74 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0 +/* Register EUR_CR_BIF_BANK0 */ +#define EUR_CR_BIF_BANK0 0x0C78 +#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_PTLA_MASK 0x000F0000U +#define EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT 16 +#define EUR_CR_BIF_BANK0_INDEX_PTLA_SIGNED 0 +/* Register EUR_CR_BIF_BANK1 */ +#define EUR_CR_BIF_BANK1 0x0C7C +#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_STATUS */ +#define EUR_CR_BIF_BANK_STATUS 0x0CB4 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0 +/* Register EUR_CR_BIF_MMU_CTRL */ +#define EUR_CR_BIF_MMU_CTRL 0x0CD0 +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK 0x00000001U +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SHIFT 0 +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_MASK 0x00000006U +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT 1 +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_MASK 0x00000008U +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SHIFT 3 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK 0x00000010U +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SHIFT 4 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_MASK 0x00000020U +#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_SHIFT 5 +#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_SIGNED 0 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SIGNED 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +#define EUR_CR_2D_BLIT_STATUS_BUSY_SIGNED 0 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SIGNED 0 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0_START */ +#define EUR_CR_BREAKPOINT0_START 0x0F44 +#define EUR_CR_BREAKPOINT0_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT0_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT0_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0_END */ +#define EUR_CR_BREAKPOINT0_END 0x0F48 +#define EUR_CR_BREAKPOINT0_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT0_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT0_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0 */ +#define EUR_CR_BREAKPOINT0 0x0F4C +#define EUR_CR_BREAKPOINT0_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT0_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT0_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1_START */ +#define EUR_CR_BREAKPOINT1_START 0x0F50 +#define EUR_CR_BREAKPOINT1_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT1_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT1_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1_END */ +#define EUR_CR_BREAKPOINT1_END 0x0F54 +#define EUR_CR_BREAKPOINT1_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT1_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT1_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1 */ +#define EUR_CR_BREAKPOINT1 0x0F58 +#define EUR_CR_BREAKPOINT1_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT1_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT1_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2_START */ +#define EUR_CR_BREAKPOINT2_START 0x0F5C +#define EUR_CR_BREAKPOINT2_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT2_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT2_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2_END */ +#define EUR_CR_BREAKPOINT2_END 0x0F60 +#define EUR_CR_BREAKPOINT2_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT2_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT2_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2 */ +#define EUR_CR_BREAKPOINT2 0x0F64 +#define EUR_CR_BREAKPOINT2_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT2_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT2_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3_START */ +#define EUR_CR_BREAKPOINT3_START 0x0F68 +#define EUR_CR_BREAKPOINT3_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT3_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT3_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3_END */ +#define EUR_CR_BREAKPOINT3_END 0x0F6C +#define EUR_CR_BREAKPOINT3_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT3_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT3_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3 */ +#define EUR_CR_BREAKPOINT3 0x0F70 +#define EUR_CR_BREAKPOINT3_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT3_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT3_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT_READ */ +#define EUR_CR_BREAKPOINT_READ 0x0F74 +#define EUR_CR_BREAKPOINT_READ_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT_READ_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT_READ_ADDRESS_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT_TRAP */ +#define EUR_CR_PARTITION_BREAKPOINT_TRAP 0x0F78 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT */ +#define EUR_CR_PARTITION_BREAKPOINT 0x0F7C +#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_MASK 0x000003C0U +#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SHIFT 6 +#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_PARTITION_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_PARTITION_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0 0x0F80 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1 0x0F84 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_0 */ +#define EUR_CR_USE_CODE_BASE_0 0x0A0C +#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_1 */ +#define EUR_CR_USE_CODE_BASE_1 0x0A10 +#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_2 */ +#define EUR_CR_USE_CODE_BASE_2 0x0A14 +#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_3 */ +#define EUR_CR_USE_CODE_BASE_3 0x0A18 +#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_4 */ +#define EUR_CR_USE_CODE_BASE_4 0x0A1C +#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_5 */ +#define EUR_CR_USE_CODE_BASE_5 0x0A20 +#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_6 */ +#define EUR_CR_USE_CODE_BASE_6 0x0A24 +#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_7 */ +#define EUR_CR_USE_CODE_BASE_7 0x0A28 +#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_8 */ +#define EUR_CR_USE_CODE_BASE_8 0x0A2C +#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_9 */ +#define EUR_CR_USE_CODE_BASE_9 0x0A30 +#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_10 */ +#define EUR_CR_USE_CODE_BASE_10 0x0A34 +#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_11 */ +#define EUR_CR_USE_CODE_BASE_11 0x0A38 +#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_12 */ +#define EUR_CR_USE_CODE_BASE_12 0x0A3C +#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_13 */ +#define EUR_CR_USE_CODE_BASE_13 0x0A40 +#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_14 */ +#define EUR_CR_USE_CODE_BASE_14 0x0A44 +#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_15 */ +#define EUR_CR_USE_CODE_BASE_15 0x0A48 +#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT_TRAP */ +#define EUR_CR_PIPE0_BREAKPOINT_TRAP 0x0F88 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT */ +#define EUR_CR_PIPE0_BREAKPOINT 0x0F8C +#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_MASK 0x000003C0U +#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SHIFT 6 +#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_PIPE0_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_PIPE0_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0 0x0F90 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1 0x0F94 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT_TRAP */ +#define EUR_CR_PIPE1_BREAKPOINT_TRAP 0x0F98 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT */ +#define EUR_CR_PIPE1_BREAKPOINT 0x0F9C +#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_MASK 0x000003C0U +#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SHIFT 6 +#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_PIPE1_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_PIPE1_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0 0x0FA0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1 0x0FA4 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX543DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx544defs.h b/pvr-source/services4/srvkm/hwdefs/sgx544defs.h new file mode 100644 index 0000000..79efcbc --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx544defs.h @@ -0,0 +1,1487 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX544. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX544DEFS_KM_H_ +#define _SGX544DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_PDS_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL_PDS_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL_PDS_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL_IDXFIFO_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL_TA_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL_BIF_CORE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_MASK 0x10000000U +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SHIFT 28 +#define EUR_CR_CLKGATECTL_SYSTEM_CLKG_SIGNED 0 +/* Register EUR_CR_CLKGATECTL2 */ +#define EUR_CR_CLKGATECTL2 0x0004 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL2_TCU_L2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_MASK 0x00C00000U +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SHIFT 22 +#define EUR_CR_CLKGATECTL2_DCU_L2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_MASK 0x03000000U +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SHIFT 24 +#define EUR_CR_CLKGATECTL2_DCU1_L0L1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_MASK 0x0C000000U +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SHIFT 26 +#define EUR_CR_CLKGATECTL2_DCU0_L0L1_CLKG_SIGNED 0 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0008 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_MASK 0x00000080U +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SHIFT 7 +#define EUR_CR_CLKGATESTATUS_PDS_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_MASK 0x00000200U +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SHIFT 9 +#define EUR_CR_CLKGATESTATUS_TCU_L2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_MASK 0x00080000U +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SHIFT 19 +#define EUR_CR_CLKGATESTATUS_IDXFIFO_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00100000U +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 20 +#define EUR_CR_CLKGATESTATUS_TA_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_MASK 0x00200000U +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SHIFT 21 +#define EUR_CR_CLKGATESTATUS_DCU_L2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_MASK 0x00400000U +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SHIFT 22 +#define EUR_CR_CLKGATESTATUS_DCU0_L0L1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_MASK 0x00800000U +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SHIFT 23 +#define EUR_CR_CLKGATESTATUS_DCU1_L0L1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_MASK 0x01000000U +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SHIFT 24 +#define EUR_CR_CLKGATESTATUS_BIF_CORE_CLKS_SIGNED 0 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x000C +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_MASK 0x0000C000U +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SHIFT 14 +#define EUR_CR_CLKGATECTLOVR_PDS_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_MASK 0x00030000U +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SHIFT 16 +#define EUR_CR_CLKGATECTLOVR_IDXFIFO_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x000C0000U +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 18 +#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_MASK 0x00300000U +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SHIFT 20 +#define EUR_CR_CLKGATECTLOVR_BIF_CORE_CLKO_SIGNED 0 +/* Register EUR_CR_POWER */ +#define EUR_CR_POWER 0x001C +#define EUR_CR_POWER_PIPE_DISABLE_MASK 0x00000001U +#define EUR_CR_POWER_PIPE_DISABLE_SHIFT 0 +#define EUR_CR_POWER_PIPE_DISABLE_SIGNED 0 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x0020 +#define EUR_CR_CORE_ID_CONFIG_MULTI_MASK 0x00000001U +#define EUR_CR_CORE_ID_CONFIG_MULTI_SHIFT 0 +#define EUR_CR_CORE_ID_CONFIG_MULTI_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_BASE_MASK 0x00000002U +#define EUR_CR_CORE_ID_CONFIG_BASE_SHIFT 1 +#define EUR_CR_CORE_ID_CONFIG_BASE_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_MASK 0x000000FCU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 2 +#define EUR_CR_CORE_ID_CONFIG_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_CORES_MASK 0x00000F00U +#define EUR_CR_CORE_ID_CONFIG_CORES_SHIFT 8 +#define EUR_CR_CORE_ID_CONFIG_CORES_SIGNED 0 +#define EUR_CR_CORE_ID_CONFIG_SLC_MASK 0x0000F000U +#define EUR_CR_CORE_ID_CONFIG_SLC_SHIFT 12 +#define EUR_CR_CORE_ID_CONFIG_SLC_SIGNED 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +#define EUR_CR_CORE_ID_ID_SIGNED 0 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0024 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0028 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x002C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_VDM_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_VDM_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_VDM_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TE_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_TE_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_TE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_MTE_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_MTE_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_MTE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ISP2_RESET_MASK 0x00000040U +#define EUR_CR_SOFT_RESET_ISP2_RESET_SHIFT 6 +#define EUR_CR_SOFT_RESET_ISP2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000080U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 7 +#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_PDS_RESET_MASK 0x00000100U +#define EUR_CR_SOFT_RESET_PDS_RESET_SHIFT 8 +#define EUR_CR_SOFT_RESET_PDS_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_PBE_RESET_MASK 0x00000200U +#define EUR_CR_SOFT_RESET_PBE_RESET_SHIFT 9 +#define EUR_CR_SOFT_RESET_PBE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK 0x00000400U +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SHIFT 10 +#define EUR_CR_SOFT_RESET_TCU_L2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK 0x00000800U +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SHIFT 11 +#define EUR_CR_SOFT_RESET_UCACHEL2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ITR_RESET_MASK 0x00002000U +#define EUR_CR_SOFT_RESET_ITR_RESET_SHIFT 13 +#define EUR_CR_SOFT_RESET_ITR_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TEX_RESET_MASK 0x00004000U +#define EUR_CR_SOFT_RESET_TEX_RESET_SHIFT 14 +#define EUR_CR_SOFT_RESET_TEX_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00008000U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 15 +#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK 0x00010000U +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SHIFT 16 +#define EUR_CR_SOFT_RESET_IDXFIFO_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00020000U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 17 +#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK 0x00040000U +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SHIFT 18 +#define EUR_CR_SOFT_RESET_DCU_L2_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK 0x00080000U +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SHIFT 19 +#define EUR_CR_SOFT_RESET_DCU_L0L1_RESET_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SHIFT 11 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SHIFT 10 +#define EUR_CR_EVENT_STATUS2_DATA_BREAKPOINT_TRAPPED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SHIFT 9 +#define EUR_CR_EVENT_STATUS2_MTE_CONTEXT_DRAINED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SHIFT 8 +#define EUR_CR_EVENT_STATUS2_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 7 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 6 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SHIFT 5 +#define EUR_CR_EVENT_STATUS2_TE_RGNHDR_INIT_COMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 4 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012C +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_TCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +#define EUR_CR_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_EVENT_KICK1 */ +#define EUR_CR_EVENT_KICK1 0x0AB0 +#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU +#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICK2 */ +#define EUR_CR_EVENT_KICK2 0x0AC0 +#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +#define EUR_CR_PDS_INV0_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +#define EUR_CR_PDS_INV1_DSC_SIGNED 0 +/* Register EUR_CR_EVENT_KICK3 */ +#define EUR_CR_EVENT_KICK3 0x0AD8 +#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0ADC +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +#define EUR_CR_PDS_INV3_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TA_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_MASK 0x00010000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SHIFT 16 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PTLA_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_MASK 0x00020000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SHIFT 17 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_MASK 0x00040000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SHIFT 18 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_MASK 0x00080000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SHIFT 19 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SIGNED 0 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x00003FFFU +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU +#define EUR_CR_BIF_FAULT_CID_SHIFT 0 +#define EUR_CR_BIF_FAULT_CID_SIGNED 0 +#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U +#define EUR_CR_BIF_FAULT_SB_SHIFT 4 +#define EUR_CR_BIF_FAULT_SB_SIGNED 0 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TILE0 */ +#define EUR_CR_BIF_TILE0 0x0C0C +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE0_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE0_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE1 */ +#define EUR_CR_BIF_TILE1 0x0C10 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE1_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE1_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE2 */ +#define EUR_CR_BIF_TILE2 0x0C14 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE2_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE2_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE3 */ +#define EUR_CR_BIF_TILE3 0x0C18 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE3_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE3_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE4 */ +#define EUR_CR_BIF_TILE4 0x0C1C +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE4_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE4_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE5 */ +#define EUR_CR_BIF_TILE5 0x0C20 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE5_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE5_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE6 */ +#define EUR_CR_BIF_TILE6 0x0C24 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE6_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE6_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE7 */ +#define EUR_CR_BIF_TILE7 0x0C28 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE7_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE7_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE8 */ +#define EUR_CR_BIF_TILE8 0x0C2C +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE8_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE8_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE9 */ +#define EUR_CR_BIF_TILE9 0x0C30 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE9_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE9_CFG_SIGNED 0 +/* Register EUR_CR_BIF_CTRL_INVAL */ +#define EUR_CR_BIF_CTRL_INVAL 0x0C34 +#define EUR_CR_BIF_CTRL_INVAL_PTE_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_INVAL_PTE_SHIFT 2 +#define EUR_CR_BIF_CTRL_INVAL_PTE_SIGNED 0 +#define EUR_CR_BIF_CTRL_INVAL_ALL_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVAL_ALL_SHIFT 3 +#define EUR_CR_BIF_CTRL_INVAL_ALL_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE1 */ +#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE2 */ +#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE3 */ +#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE4 */ +#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE5 */ +#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE6 */ +#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE7 */ +#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_SET */ +#define EUR_CR_BIF_BANK_SET 0x0C74 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0 +/* Register EUR_CR_BIF_BANK0 */ +#define EUR_CR_BIF_BANK0 0x0C78 +#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_PTLA_MASK 0x000F0000U +#define EUR_CR_BIF_BANK0_INDEX_PTLA_SHIFT 16 +#define EUR_CR_BIF_BANK0_INDEX_PTLA_SIGNED 0 +/* Register EUR_CR_BIF_BANK1 */ +#define EUR_CR_BIF_BANK1 0x0C7C +#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_STATUS */ +#define EUR_CR_BIF_BANK_STATUS 0x0CB4 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0 +/* Register EUR_CR_BIF_MMU_CTRL */ +#define EUR_CR_BIF_MMU_CTRL 0x0CD0 +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_MASK 0x00000001U +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SHIFT 0 +#define EUR_CR_BIF_MMU_CTRL_PREFETCHING_ON_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_MASK 0x00000006U +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT 1 +#define EUR_CR_BIF_MMU_CTRL_ADDR_HASH_MODE_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_MASK 0x00000008U +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SHIFT 3 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_WRITE_BURST_COLLATE_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK 0x00000010U +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SHIFT 4 +#define EUR_CR_BIF_MMU_CTRL_ENABLE_DC_TLB_SIGNED 0 +#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_MASK 0x00000020U +#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_SHIFT 5 +#define EUR_CR_BIF_MMU_CTRL_DISABLE_BURST_EXP_SIGNED 0 +/* Register EUR_CR_2D_BLIT_STATUS */ +#define EUR_CR_2D_BLIT_STATUS 0x0E04 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFU +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0 +#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SIGNED 0 +#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000U +#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24 +#define EUR_CR_2D_BLIT_STATUS_BUSY_SIGNED 0 +/* Register EUR_CR_2D_VIRTUAL_FIFO_0 */ +#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001U +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EU +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000U +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SIGNED 0 +/* Register EUR_CR_2D_VIRTUAL_FIFO_1 */ +#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFU +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SIGNED 0 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000U +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24 +#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0_START */ +#define EUR_CR_BREAKPOINT0_START 0x0F44 +#define EUR_CR_BREAKPOINT0_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT0_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT0_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0_END */ +#define EUR_CR_BREAKPOINT0_END 0x0F48 +#define EUR_CR_BREAKPOINT0_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT0_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT0_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT0 */ +#define EUR_CR_BREAKPOINT0 0x0F4C +#define EUR_CR_BREAKPOINT0_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT0_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT0_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT0_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT0_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1_START */ +#define EUR_CR_BREAKPOINT1_START 0x0F50 +#define EUR_CR_BREAKPOINT1_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT1_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT1_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1_END */ +#define EUR_CR_BREAKPOINT1_END 0x0F54 +#define EUR_CR_BREAKPOINT1_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT1_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT1_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT1 */ +#define EUR_CR_BREAKPOINT1 0x0F58 +#define EUR_CR_BREAKPOINT1_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT1_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT1_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT1_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT1_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT1_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2_START */ +#define EUR_CR_BREAKPOINT2_START 0x0F5C +#define EUR_CR_BREAKPOINT2_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT2_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT2_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2_END */ +#define EUR_CR_BREAKPOINT2_END 0x0F60 +#define EUR_CR_BREAKPOINT2_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT2_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT2_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT2 */ +#define EUR_CR_BREAKPOINT2 0x0F64 +#define EUR_CR_BREAKPOINT2_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT2_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT2_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT2_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT2_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT2_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3_START */ +#define EUR_CR_BREAKPOINT3_START 0x0F68 +#define EUR_CR_BREAKPOINT3_START_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT3_START_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT3_START_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3_END */ +#define EUR_CR_BREAKPOINT3_END 0x0F6C +#define EUR_CR_BREAKPOINT3_END_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT3_END_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT3_END_ADDRESS_SIGNED 0 +/* Register EUR_CR_BREAKPOINT3 */ +#define EUR_CR_BREAKPOINT3 0x0F70 +#define EUR_CR_BREAKPOINT3_MASK_DM_MASK 0x00000038U +#define EUR_CR_BREAKPOINT3_MASK_DM_SHIFT 3 +#define EUR_CR_BREAKPOINT3_MASK_DM_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_MASK 0x00000004U +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SHIFT 2 +#define EUR_CR_BREAKPOINT3_CTRL_TRAPENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_MASK 0x00000002U +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SHIFT 1 +#define EUR_CR_BREAKPOINT3_CTRL_WENABLE_SIGNED 0 +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_MASK 0x00000001U +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SHIFT 0 +#define EUR_CR_BREAKPOINT3_CTRL_RENABLE_SIGNED 0 +/* Register EUR_CR_BREAKPOINT_READ */ +#define EUR_CR_BREAKPOINT_READ 0x0F74 +#define EUR_CR_BREAKPOINT_READ_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_BREAKPOINT_READ_ADDRESS_SHIFT 4 +#define EUR_CR_BREAKPOINT_READ_ADDRESS_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT_TRAP */ +#define EUR_CR_PARTITION_BREAKPOINT_TRAP 0x0F78 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT */ +#define EUR_CR_PARTITION_BREAKPOINT 0x0F7C +#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_MASK 0x000003C0U +#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SHIFT 6 +#define EUR_CR_PARTITION_BREAKPOINT_MODULE_ID_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_PARTITION_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_PARTITION_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_PARTITION_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_PARTITION_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0 0x0F80 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1 0x0F84 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_PARTITION_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_0 */ +#define EUR_CR_USE_CODE_BASE_0 0x0A0C +#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_1 */ +#define EUR_CR_USE_CODE_BASE_1 0x0A10 +#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_2 */ +#define EUR_CR_USE_CODE_BASE_2 0x0A14 +#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_3 */ +#define EUR_CR_USE_CODE_BASE_3 0x0A18 +#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_4 */ +#define EUR_CR_USE_CODE_BASE_4 0x0A1C +#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_5 */ +#define EUR_CR_USE_CODE_BASE_5 0x0A20 +#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_6 */ +#define EUR_CR_USE_CODE_BASE_6 0x0A24 +#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_7 */ +#define EUR_CR_USE_CODE_BASE_7 0x0A28 +#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_8 */ +#define EUR_CR_USE_CODE_BASE_8 0x0A2C +#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_9 */ +#define EUR_CR_USE_CODE_BASE_9 0x0A30 +#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_10 */ +#define EUR_CR_USE_CODE_BASE_10 0x0A34 +#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_11 */ +#define EUR_CR_USE_CODE_BASE_11 0x0A38 +#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_12 */ +#define EUR_CR_USE_CODE_BASE_12 0x0A3C +#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_13 */ +#define EUR_CR_USE_CODE_BASE_13 0x0A40 +#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_14 */ +#define EUR_CR_USE_CODE_BASE_14 0x0A44 +#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_15 */ +#define EUR_CR_USE_CODE_BASE_15 0x0A48 +#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT_TRAP */ +#define EUR_CR_PIPE0_BREAKPOINT_TRAP 0x0F88 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT */ +#define EUR_CR_PIPE0_BREAKPOINT 0x0F8C +#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_MASK 0x000003C0U +#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SHIFT 6 +#define EUR_CR_PIPE0_BREAKPOINT_MODULE_ID_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_PIPE0_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_PIPE0_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_PIPE0_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_PIPE0_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0 0x0F90 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1 0x0F94 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_PIPE0_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT_TRAP */ +#define EUR_CR_PIPE1_BREAKPOINT_TRAP 0x0F98 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT */ +#define EUR_CR_PIPE1_BREAKPOINT 0x0F9C +#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_MASK 0x000003C0U +#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SHIFT 6 +#define EUR_CR_PIPE1_BREAKPOINT_MODULE_ID_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_PIPE1_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_PIPE1_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_PIPE1_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_PIPE1_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0 0x0FA0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1 0x0FA4 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_PIPE1_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x03FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x0C000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 26 +#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX544DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgx545defs.h b/pvr-source/services4/srvkm/hwdefs/sgx545defs.h new file mode 100644 index 0000000..c5adee2 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgx545defs.h @@ -0,0 +1,1290 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGX545. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGX545DEFS_KM_H_ +#define _SGX545DEFS_KM_H_ + +/* Register EUR_CR_CLKGATECTL */ +#define EUR_CR_CLKGATECTL 0x0000 +#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL_ISP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL_ISP2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL_TSP_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_TE_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL_TE_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL_TE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_MTE_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL_MTE_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL_MTE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL_DPM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_VDM_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL_VDM_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL_VDM_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_PDS0_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL_PDS0_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL_PDS0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000U +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24 +#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SIGNED 0 +/* Register EUR_CR_CLKGATECTL2 */ +#define EUR_CR_CLKGATECTL2 0x0004 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_MASK 0x00000003U +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SHIFT 0 +#define EUR_CR_CLKGATECTL2_PBE_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_MASK 0x0000000CU +#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SHIFT 2 +#define EUR_CR_CLKGATECTL2_CACHEL2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_MASK 0x00000030U +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SHIFT 4 +#define EUR_CR_CLKGATECTL2_UCACHEL2_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_MASK 0x000000C0U +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SHIFT 6 +#define EUR_CR_CLKGATECTL2_USE0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_MASK 0x00000300U +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SHIFT 8 +#define EUR_CR_CLKGATECTL2_ITR0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_MASK 0x00000C00U +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SHIFT 10 +#define EUR_CR_CLKGATECTL2_TEX0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_MADD0_CLKG_MASK 0x00003000U +#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SHIFT 12 +#define EUR_CR_CLKGATECTL2_MADD0_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_MASK 0x0000C000U +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SHIFT 14 +#define EUR_CR_CLKGATECTL2_USE1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_MASK 0x00030000U +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SHIFT 16 +#define EUR_CR_CLKGATECTL2_ITR1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_MASK 0x000C0000U +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SHIFT 18 +#define EUR_CR_CLKGATECTL2_TEX1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_MADD1_CLKG_MASK 0x00300000U +#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SHIFT 20 +#define EUR_CR_CLKGATECTL2_MADD1_CLKG_SIGNED 0 +#define EUR_CR_CLKGATECTL2_PDS1_CLKG_MASK 0x00C00000U +#define EUR_CR_CLKGATECTL2_PDS1_CLKG_SHIFT 22 +#define EUR_CR_CLKGATECTL2_PDS1_CLKG_SIGNED 0 +/* Register EUR_CR_CLKGATESTATUS */ +#define EUR_CR_CLKGATESTATUS 0x0008 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000001U +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 0 +#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_MASK 0x00000002U +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SHIFT 1 +#define EUR_CR_CLKGATESTATUS_ISP2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000004U +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 2 +#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_MASK 0x00000008U +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SHIFT 3 +#define EUR_CR_CLKGATESTATUS_TE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_MASK 0x00000010U +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SHIFT 4 +#define EUR_CR_CLKGATESTATUS_MTE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00000020U +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 5 +#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_MASK 0x00000040U +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SHIFT 6 +#define EUR_CR_CLKGATESTATUS_VDM_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PDS0_CLKS_MASK 0x00000080U +#define EUR_CR_CLKGATESTATUS_PDS0_CLKS_SHIFT 7 +#define EUR_CR_CLKGATESTATUS_PDS0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_MASK 0x00000100U +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SHIFT 8 +#define EUR_CR_CLKGATESTATUS_PBE_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_MASK 0x00000200U +#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SHIFT 9 +#define EUR_CR_CLKGATESTATUS_CACHEL2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_MASK 0x00000400U +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SHIFT 10 +#define EUR_CR_CLKGATESTATUS_UCACHEL2_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_MASK 0x00000800U +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SHIFT 11 +#define EUR_CR_CLKGATESTATUS_USE0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_MASK 0x00001000U +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SHIFT 12 +#define EUR_CR_CLKGATESTATUS_ITR0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_MASK 0x00002000U +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SHIFT 13 +#define EUR_CR_CLKGATESTATUS_TEX0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_MASK 0x00004000U +#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SHIFT 14 +#define EUR_CR_CLKGATESTATUS_MADD0_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_MASK 0x00008000U +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SHIFT 15 +#define EUR_CR_CLKGATESTATUS_USE1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_MASK 0x00010000U +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SHIFT 16 +#define EUR_CR_CLKGATESTATUS_ITR1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_MASK 0x00020000U +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SHIFT 17 +#define EUR_CR_CLKGATESTATUS_TEX1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_MASK 0x00040000U +#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SHIFT 18 +#define EUR_CR_CLKGATESTATUS_MADD1_CLKS_SIGNED 0 +#define EUR_CR_CLKGATESTATUS_PDS1_CLKS_MASK 0x00080000U +#define EUR_CR_CLKGATESTATUS_PDS1_CLKS_SHIFT 19 +#define EUR_CR_CLKGATESTATUS_PDS1_CLKS_SIGNED 0 +/* Register EUR_CR_CLKGATECTLOVR */ +#define EUR_CR_CLKGATECTLOVR 0x000C +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000003U +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 0 +#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_MASK 0x0000000CU +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SHIFT 2 +#define EUR_CR_CLKGATECTLOVR_ISP2_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000030U +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 4 +#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_MASK 0x000000C0U +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SHIFT 6 +#define EUR_CR_CLKGATECTLOVR_TE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_MASK 0x00000300U +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SHIFT 8 +#define EUR_CR_CLKGATECTLOVR_MTE_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00000C00U +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 10 +#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_MASK 0x00003000U +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SHIFT 12 +#define EUR_CR_CLKGATECTLOVR_VDM_CLKO_SIGNED 0 +#define EUR_CR_CLKGATECTLOVR_PDS0_CLKO_MASK 0x0000C000U +#define EUR_CR_CLKGATECTLOVR_PDS0_CLKO_SHIFT 14 +#define EUR_CR_CLKGATECTLOVR_PDS0_CLKO_SIGNED 0 +/* Register EUR_CR_CORE_ID */ +#define EUR_CR_CORE_ID 0x001C +#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFU +#define EUR_CR_CORE_ID_CONFIG_SHIFT 0 +#define EUR_CR_CORE_ID_CONFIG_SIGNED 0 +#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_CORE_ID_ID_SHIFT 16 +#define EUR_CR_CORE_ID_ID_SIGNED 0 +/* Register EUR_CR_CORE_REVISION */ +#define EUR_CR_CORE_REVISION 0x0020 +#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_CORE_REVISION_MAINTENANCE_SIGNED 0 +#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_CORE_REVISION_MINOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_CORE_REVISION_MAJOR_SIGNED 0 +#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24 +#define EUR_CR_CORE_REVISION_DESIGNER_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD1 */ +#define EUR_CR_DESIGNER_REV_FIELD1 0x0024 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SIGNED 0 +/* Register EUR_CR_DESIGNER_REV_FIELD2 */ +#define EUR_CR_DESIGNER_REV_FIELD2 0x002C +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFU +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0 +#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SIGNED 0 +/* Register EUR_CR_SOFT_RESET */ +#define EUR_CR_SOFT_RESET 0x0080 +#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001U +#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0 +#define EUR_CR_SOFT_RESET_BIF_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000002U +#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 1 +#define EUR_CR_SOFT_RESET_DPM_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000004U +#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 2 +#define EUR_CR_SOFT_RESET_TA_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000008U +#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 3 +#define EUR_CR_SOFT_RESET_USE_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000010U +#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 4 +#define EUR_CR_SOFT_RESET_ISP_RESET_SIGNED 0 +#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000020U +#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 5 +#define EUR_CR_SOFT_RESET_TSP_RESET_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE2 */ +#define EUR_CR_EVENT_HOST_ENABLE2 0x0110 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_VDM_CONTEXT_LOAD_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE2_VDM_CONTEXT_LOAD_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE2_VDM_CONTEXT_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_VDM_TASK_KICKED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE2_VDM_TASK_KICKED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE2_VDM_TASK_KICKED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_MEM_CLEARED_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_MEM_CLEARED_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_MEM_CLEARED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_FLUSHED_INV_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_FLUSHED_INV_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE2_OTPM_FLUSHED_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_GSG_FLUSHED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE2_GSG_FLUSHED_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE2_GSG_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_GSG_LOADED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE2_GSG_LOADED_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE2_GSG_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR2 */ +#define EUR_CR_EVENT_HOST_CLEAR2 0x0114 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_VDM_CONTEXT_LOAD_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR2_VDM_CONTEXT_LOAD_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR2_VDM_CONTEXT_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_VDM_TASK_KICKED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR2_VDM_TASK_KICKED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR2_VDM_TASK_KICKED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_MEM_CLEARED_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_MEM_CLEARED_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_MEM_CLEARED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_FLUSHED_INV_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_FLUSHED_INV_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR2_OTPM_FLUSHED_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_GSG_FLUSHED_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR2_GSG_FLUSHED_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR2_GSG_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_GSG_LOADED_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR2_GSG_LOADED_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR2_GSG_LOADED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS2 */ +#define EUR_CR_EVENT_STATUS2 0x0118 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SHIFT 15 +#define EUR_CR_EVENT_STATUS2_MTE_STATE_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_VDM_CONTEXT_LOAD_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS2_VDM_CONTEXT_LOAD_SHIFT 14 +#define EUR_CR_EVENT_STATUS2_VDM_CONTEXT_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_VDM_TASK_KICKED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS2_VDM_TASK_KICKED_SHIFT 13 +#define EUR_CR_EVENT_STATUS2_VDM_TASK_KICKED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_OTPM_MEM_CLEARED_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS2_OTPM_MEM_CLEARED_SHIFT 12 +#define EUR_CR_EVENT_STATUS2_OTPM_MEM_CLEARED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_OTPM_FLUSHED_INV_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS2_OTPM_FLUSHED_INV_SHIFT 11 +#define EUR_CR_EVENT_STATUS2_OTPM_FLUSHED_INV_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SHIFT 10 +#define EUR_CR_EVENT_STATUS2_DCU_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_GSG_FLUSHED_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS2_GSG_FLUSHED_SHIFT 9 +#define EUR_CR_EVENT_STATUS2_GSG_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_GSG_LOADED_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS2_GSG_LOADED_SHIFT 8 +#define EUR_CR_EVENT_STATUS2_GSG_LOADED_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SHIFT 7 +#define EUR_CR_EVENT_STATUS2_TRIG_TA_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SHIFT 6 +#define EUR_CR_EVENT_STATUS2_TRIG_3D_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SHIFT 5 +#define EUR_CR_EVENT_STATUS2_TRIG_DL_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SHIFT 3 +#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SHIFT 2 +#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1 +#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0 +#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SIGNED 0 +/* Register EUR_CR_EVENT_STATUS */ +#define EUR_CR_EVENT_STATUS 0x012C +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29 +#define EUR_CR_EVENT_STATUS_TIMER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_STATUS_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_STATUS_ISP2_ZLS_CSW_FINISHED_MASK 0x00200000U +#define EUR_CR_EVENT_STATUS_ISP2_ZLS_CSW_FINISHED_SHIFT 21 +#define EUR_CR_EVENT_STATUS_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_STATUS_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SIGNED 0 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_STATUS_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_STATUS_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_STATUS_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_ENABLE */ +#define EUR_CR_EVENT_HOST_ENABLE 0x0130 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_ISP2_ZLS_CSW_FINISHED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP2_ZLS_CSW_FINISHED_SHIFT 21 +#define EUR_CR_EVENT_HOST_ENABLE_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_EVENT_HOST_CLEAR */ +#define EUR_CR_EVENT_HOST_CLEAR 0x0134 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000U +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31 +#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000U +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29 +#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28 +#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000U +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26 +#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_ISP2_ZLS_CSW_FINISHED_MASK 0x00200000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP2_ZLS_CSW_FINISHED_SHIFT 21 +#define EUR_CR_EVENT_HOST_CLEAR_ISP2_ZLS_CSW_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000U +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19 +#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000U +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18 +#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000U +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16 +#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000U +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15 +#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000U +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14 +#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13 +#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000U +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12 +#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400U +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10 +#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SIGNED 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001U +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0 +#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SIGNED 0 +/* Register EUR_CR_TIMER */ +#define EUR_CR_TIMER 0x0144 +#define EUR_CR_TIMER_VALUE_MASK 0xFFFFFFFFU +#define EUR_CR_TIMER_VALUE_SHIFT 0 +#define EUR_CR_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_0 */ +#define EUR_CR_USE_CODE_BASE_0 0x0A0C +#define EUR_CR_USE_CODE_BASE_ADDR_00_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_00_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_00_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_00_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_00_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_00_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_1 */ +#define EUR_CR_USE_CODE_BASE_1 0x0A10 +#define EUR_CR_USE_CODE_BASE_ADDR_01_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_01_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_01_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_01_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_01_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_01_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_2 */ +#define EUR_CR_USE_CODE_BASE_2 0x0A14 +#define EUR_CR_USE_CODE_BASE_ADDR_02_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_02_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_02_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_02_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_02_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_02_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_3 */ +#define EUR_CR_USE_CODE_BASE_3 0x0A18 +#define EUR_CR_USE_CODE_BASE_ADDR_03_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_03_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_03_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_03_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_03_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_03_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_4 */ +#define EUR_CR_USE_CODE_BASE_4 0x0A1C +#define EUR_CR_USE_CODE_BASE_ADDR_04_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_04_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_04_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_04_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_04_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_04_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_5 */ +#define EUR_CR_USE_CODE_BASE_5 0x0A20 +#define EUR_CR_USE_CODE_BASE_ADDR_05_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_05_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_05_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_05_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_05_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_05_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_6 */ +#define EUR_CR_USE_CODE_BASE_6 0x0A24 +#define EUR_CR_USE_CODE_BASE_ADDR_06_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_06_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_06_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_06_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_06_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_06_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_7 */ +#define EUR_CR_USE_CODE_BASE_7 0x0A28 +#define EUR_CR_USE_CODE_BASE_ADDR_07_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_07_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_07_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_07_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_07_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_07_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_8 */ +#define EUR_CR_USE_CODE_BASE_8 0x0A2C +#define EUR_CR_USE_CODE_BASE_ADDR_08_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_08_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_08_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_08_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_08_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_08_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_9 */ +#define EUR_CR_USE_CODE_BASE_9 0x0A30 +#define EUR_CR_USE_CODE_BASE_ADDR_09_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_09_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_09_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_09_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_09_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_09_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_10 */ +#define EUR_CR_USE_CODE_BASE_10 0x0A34 +#define EUR_CR_USE_CODE_BASE_ADDR_10_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_10_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_10_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_10_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_10_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_10_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_11 */ +#define EUR_CR_USE_CODE_BASE_11 0x0A38 +#define EUR_CR_USE_CODE_BASE_ADDR_11_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_11_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_11_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_11_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_11_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_11_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_12 */ +#define EUR_CR_USE_CODE_BASE_12 0x0A3C +#define EUR_CR_USE_CODE_BASE_ADDR_12_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_12_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_12_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_12_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_12_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_12_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_13 */ +#define EUR_CR_USE_CODE_BASE_13 0x0A40 +#define EUR_CR_USE_CODE_BASE_ADDR_13_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_13_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_13_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_13_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_13_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_13_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_14 */ +#define EUR_CR_USE_CODE_BASE_14 0x0A44 +#define EUR_CR_USE_CODE_BASE_ADDR_14_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_14_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_14_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_14_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_14_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_14_SIGNED 0 +/* Register EUR_CR_USE_CODE_BASE_15 */ +#define EUR_CR_USE_CODE_BASE_15 0x0A48 +#define EUR_CR_USE_CODE_BASE_ADDR_15_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_15_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_15_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_15_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_15_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_15_SIGNED 0 +/* Register EUR_CR_PDS_EXEC_BASE */ +#define EUR_CR_PDS_EXEC_BASE 0x0AB8 +#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20 +#define EUR_CR_PDS_EXEC_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_EVENT_KICKER */ +#define EUR_CR_EVENT_KICKER 0x0AC4 +#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4 +#define EUR_CR_EVENT_KICKER_ADDRESS_SIGNED 0 +/* Register EUR_CR_EVENT_KICK */ +#define EUR_CR_EVENT_KICK 0x0AC8 +#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_TIMER */ +#define EUR_CR_EVENT_TIMER 0x0ACC +#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000U +#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24 +#define EUR_CR_EVENT_TIMER_ENABLE_SIGNED 0 +#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFU +#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0 +#define EUR_CR_EVENT_TIMER_VALUE_SIGNED 0 +/* Register EUR_CR_PDS_INV0 */ +#define EUR_CR_PDS_INV0 0x0AD0 +#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV0_DSC_SHIFT 0 +#define EUR_CR_PDS_INV0_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV1 */ +#define EUR_CR_PDS_INV1 0x0AD4 +#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV1_DSC_SHIFT 0 +#define EUR_CR_PDS_INV1_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV3 */ +#define EUR_CR_PDS_INV3 0x0AD8 +#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001U +#define EUR_CR_PDS_INV3_DSC_SHIFT 0 +#define EUR_CR_PDS_INV3_DSC_SIGNED 0 +/* Register EUR_CR_PDS_INV_CSC */ +#define EUR_CR_PDS_INV_CSC 0x0AE0 +#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001U +#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0 +#define EUR_CR_PDS_INV_CSC_KICK_SIGNED 0 +/* Register EUR_CR_EVENT_KICK1 */ +#define EUR_CR_EVENT_KICK1 0x0AE4 +#define EUR_CR_EVENT_KICK1_NOW_MASK 0x000000FFU +#define EUR_CR_EVENT_KICK1_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK1_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICK2 */ +#define EUR_CR_EVENT_KICK2 0x0AE8 +#define EUR_CR_EVENT_KICK2_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK2_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK2_NOW_SIGNED 0 +/* Register EUR_CR_EVENT_KICK3 */ +#define EUR_CR_EVENT_KICK3 0x0AEC +#define EUR_CR_EVENT_KICK3_NOW_MASK 0x00000001U +#define EUR_CR_EVENT_KICK3_NOW_SHIFT 0 +#define EUR_CR_EVENT_KICK3_NOW_SIGNED 0 +/* Register EUR_CR_BIF_CTRL */ +#define EUR_CR_BIF_CTRL 0x0C00 +#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_BIF_CTRL_NOREORDER_SIGNED 0 +#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_BIF_CTRL_PAUSE_SIGNED 0 +#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004U +#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2 +#define EUR_CR_BIF_CTRL_FLUSH_SIGNED 0 +#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008U +#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3 +#define EUR_CR_BIF_CTRL_INVALDC_SIGNED 0 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00000800U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 11 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00001000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 12 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00002000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 13 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00004000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 14 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SIGNED 0 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK 0x00008000U +#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SHIFT 15 +#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SIGNED 0 +/* Register EUR_CR_BIF_INT_STAT */ +#define EUR_CR_BIF_INT_STAT 0x0C04 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK 0x0000FFFFU +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SHIFT 0 +#define EUR_CR_BIF_INT_STAT_FAULT_REQ_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_MASK 0x00070000U +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SHIFT 16 +#define EUR_CR_BIF_INT_STAT_FAULT_TYPE_SIGNED 0 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_MASK 0x00080000U +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SHIFT 19 +#define EUR_CR_BIF_INT_STAT_FLUSH_COMPLETE_SIGNED 0 +/* Register EUR_CR_BIF_FAULT */ +#define EUR_CR_BIF_FAULT 0x0C08 +#define EUR_CR_BIF_FAULT_CID_MASK 0x0000000FU +#define EUR_CR_BIF_FAULT_CID_SHIFT 0 +#define EUR_CR_BIF_FAULT_CID_SIGNED 0 +#define EUR_CR_BIF_FAULT_SB_MASK 0x000001F0U +#define EUR_CR_BIF_FAULT_SB_SHIFT 4 +#define EUR_CR_BIF_FAULT_SB_SIGNED 0 +#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000U +#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12 +#define EUR_CR_BIF_FAULT_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TILE0 */ +#define EUR_CR_BIF_TILE0 0x0C0C +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE0_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE0_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE1 */ +#define EUR_CR_BIF_TILE1 0x0C10 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE1_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE1_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE2 */ +#define EUR_CR_BIF_TILE2 0x0C14 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE2_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE2_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE3 */ +#define EUR_CR_BIF_TILE3 0x0C18 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE3_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE3_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE4 */ +#define EUR_CR_BIF_TILE4 0x0C1C +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE4_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE4_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE5 */ +#define EUR_CR_BIF_TILE5 0x0C20 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE5_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE5_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE6 */ +#define EUR_CR_BIF_TILE6 0x0C24 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE6_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE6_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE7 */ +#define EUR_CR_BIF_TILE7 0x0C28 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE7_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE7_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE8 */ +#define EUR_CR_BIF_TILE8 0x0C2C +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE8_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE8_CFG_SIGNED 0 +/* Register EUR_CR_BIF_TILE9 */ +#define EUR_CR_BIF_TILE9 0x0C30 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFU +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0 +#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000U +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12 +#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SIGNED 0 +#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000U +#define EUR_CR_BIF_TILE9_CFG_SHIFT 24 +#define EUR_CR_BIF_TILE9_CFG_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE1 */ +#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE2 */ +#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE3 */ +#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE4 */ +#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE5 */ +#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE6 */ +#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE7 */ +#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE8 */ +#define EUR_CR_BIF_DIR_LIST_BASE8 0x0C54 +#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE9 */ +#define EUR_CR_BIF_DIR_LIST_BASE9 0x0C58 +#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE10 */ +#define EUR_CR_BIF_DIR_LIST_BASE10 0x0C5C +#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE11 */ +#define EUR_CR_BIF_DIR_LIST_BASE11 0x0C60 +#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE12 */ +#define EUR_CR_BIF_DIR_LIST_BASE12 0x0C64 +#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE13 */ +#define EUR_CR_BIF_DIR_LIST_BASE13 0x0C68 +#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE14 */ +#define EUR_CR_BIF_DIR_LIST_BASE14 0x0C6C +#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE15 */ +#define EUR_CR_BIF_DIR_LIST_BASE15 0x0C70 +#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_SET */ +#define EUR_CR_BIF_BANK_SET 0x0C74 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_MASK 0x00000001U +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SHIFT 0 +#define EUR_CR_BIF_BANK_SET_SELECT_2D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_MASK 0x0000000CU +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SHIFT 2 +#define EUR_CR_BIF_BANK_SET_SELECT_3D_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_MASK 0x00000010U +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SHIFT 4 +#define EUR_CR_BIF_BANK_SET_SELECT_HOST_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_MASK 0x000000C0U +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SHIFT 6 +#define EUR_CR_BIF_BANK_SET_SELECT_TA_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_MASK 0x00000100U +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SHIFT 8 +#define EUR_CR_BIF_BANK_SET_SELECT_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_MASK 0x00000200U +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SHIFT 9 +#define EUR_CR_BIF_BANK_SET_SELECT_DPM_LSS_SIGNED 0 +/* Register EUR_CR_BIF_BANK0 */ +#define EUR_CR_BIF_BANK0 0x0C78 +#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK0_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK0_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_HOST_MASK 0x00000F00U +#define EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT 8 +#define EUR_CR_BIF_BANK0_INDEX_HOST_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK0_INDEX_3D_SIGNED 0 +#define EUR_CR_BIF_BANK0_INDEX_2D_MASK 0x000F0000U +#define EUR_CR_BIF_BANK0_INDEX_2D_SHIFT 16 +#define EUR_CR_BIF_BANK0_INDEX_2D_SIGNED 0 +/* Register EUR_CR_BIF_BANK1 */ +#define EUR_CR_BIF_BANK1 0x0C7C +#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FU +#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0 +#define EUR_CR_BIF_BANK1_INDEX_EDM_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0U +#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4 +#define EUR_CR_BIF_BANK1_INDEX_TA_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_HOST_MASK 0x00000F00U +#define EUR_CR_BIF_BANK1_INDEX_HOST_SHIFT 8 +#define EUR_CR_BIF_BANK1_INDEX_HOST_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000U +#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12 +#define EUR_CR_BIF_BANK1_INDEX_3D_SIGNED 0 +#define EUR_CR_BIF_BANK1_INDEX_2D_MASK 0x000F0000U +#define EUR_CR_BIF_BANK1_INDEX_2D_SHIFT 16 +#define EUR_CR_BIF_BANK1_INDEX_2D_SIGNED 0 +/* Register EUR_CR_BIF_DIR_LIST_BASE0 */ +#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFFF00U +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 8 +#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_TA_REQ_BASE */ +#define EUR_CR_BIF_TA_REQ_BASE 0x0C90 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_MEM_REQ_STAT */ +#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000007FFU +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0 +#define EUR_CR_BIF_MEM_REQ_STAT_READS_SIGNED 0 +/* Register EUR_CR_BIF_3D_REQ_BASE */ +#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_ZLS_REQ_BASE */ +#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000U +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20 +#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SIGNED 0 +/* Register EUR_CR_BIF_BANK_STATUS */ +#define EUR_CR_BIF_BANK_STATUS 0x0CB4 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001U +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0 +#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SIGNED 0 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002U +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1 +#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SIGNED 0 +/* Register EUR_CR_BIF_36BIT_ADDRESSING */ +#define EUR_CR_BIF_36BIT_ADDRESSING 0x0CCC +#define EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK 0x00000001U +#define EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_SHIFT 0 +#define EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_SIGNED 0 +/* Register EUR_CR_BIF_TILE0_ADDR_EXT */ +#define EUR_CR_BIF_TILE0_ADDR_EXT 0x0CD0 +#define EUR_CR_BIF_TILE0_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE0_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE0_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE0_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE1_ADDR_EXT */ +#define EUR_CR_BIF_TILE1_ADDR_EXT 0x0CD4 +#define EUR_CR_BIF_TILE1_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE1_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE1_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE1_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE1_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE1_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE2_ADDR_EXT */ +#define EUR_CR_BIF_TILE2_ADDR_EXT 0x0CD8 +#define EUR_CR_BIF_TILE2_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE2_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE2_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE2_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE2_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE2_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE3_ADDR_EXT */ +#define EUR_CR_BIF_TILE3_ADDR_EXT 0x0CDC +#define EUR_CR_BIF_TILE3_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE3_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE3_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE3_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE3_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE3_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE4_ADDR_EXT */ +#define EUR_CR_BIF_TILE4_ADDR_EXT 0x0CE0 +#define EUR_CR_BIF_TILE4_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE4_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE4_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE4_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE4_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE4_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE5_ADDR_EXT */ +#define EUR_CR_BIF_TILE5_ADDR_EXT 0x0CE4 +#define EUR_CR_BIF_TILE5_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE5_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE5_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE5_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE5_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE5_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE6_ADDR_EXT */ +#define EUR_CR_BIF_TILE6_ADDR_EXT 0x0CE8 +#define EUR_CR_BIF_TILE6_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE6_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE6_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE6_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE6_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE6_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE7_ADDR_EXT */ +#define EUR_CR_BIF_TILE7_ADDR_EXT 0x0CEC +#define EUR_CR_BIF_TILE7_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE7_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE7_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE7_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE7_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE7_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE8_ADDR_EXT */ +#define EUR_CR_BIF_TILE8_ADDR_EXT 0x0CF0 +#define EUR_CR_BIF_TILE8_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE8_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE8_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE8_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE8_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE8_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_TILE9_ADDR_EXT */ +#define EUR_CR_BIF_TILE9_ADDR_EXT 0x0CF4 +#define EUR_CR_BIF_TILE9_ADDR_EXT_MIN_MASK 0x000000FFU +#define EUR_CR_BIF_TILE9_ADDR_EXT_MIN_SHIFT 0 +#define EUR_CR_BIF_TILE9_ADDR_EXT_MIN_SIGNED 0 +#define EUR_CR_BIF_TILE9_ADDR_EXT_MAX_MASK 0x0000FF00U +#define EUR_CR_BIF_TILE9_ADDR_EXT_MAX_SHIFT 8 +#define EUR_CR_BIF_TILE9_ADDR_EXT_MAX_SIGNED 0 +/* Register EUR_CR_BIF_CTRL_RDATA */ +#define EUR_CR_BIF_CTRL_RDATA 0x0CF8 +#define EUR_CR_BIF_CTRL_RDATA_LIMIT_MASK 0x000003FFU +#define EUR_CR_BIF_CTRL_RDATA_LIMIT_SHIFT 0 +#define EUR_CR_BIF_CTRL_RDATA_LIMIT_SIGNED 0 +/* Table EUR_CR_USE_CODE_BASE */ +/* Register EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X))) +#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x01FFFFFFU +#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0 +#define EUR_CR_USE_CODE_BASE_ADDR_SIGNED 0 +#define EUR_CR_USE_CODE_BASE_DM_MASK 0x06000000U +#define EUR_CR_USE_CODE_BASE_DM_SHIFT 25 +#define EUR_CR_USE_CODE_BASE_DM_SIGNED 0 +/* Number of entries in table EUR_CR_USE_CODE_BASE */ +#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16 +#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16 + +#endif /* _SGX545DEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/hwdefs/sgxdefs.h b/pvr-source/services4/srvkm/hwdefs/sgxdefs.h new file mode 100644 index 0000000..ed24647 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgxdefs.h @@ -0,0 +1,112 @@ +/*************************************************************************/ /*! +@Title SGX hw definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGXDEFS_H_ +#define _SGXDEFS_H_ + +#include "sgxerrata.h" +#include "sgxfeaturedefs.h" + +#if defined(SGX520) +#include "sgx520defs.h" +#else +#if defined(SGX530) +#include "sgx530defs.h" +#else +#if defined(SGX535) +#include "sgx535defs.h" +#else +#if defined(SGX535_V1_1) +#include "sgx535defs.h" +#else +#if defined(SGX540) +#include "sgx540defs.h" +#else +#if defined(SGX543) +#if defined(FIX_HW_BRN_29954) +#include "sgx543_v1.164defs.h" +#else +#include "sgx543defs.h" +#endif +#else +#if defined(SGX544) +#include "sgx544defs.h" +#else +#if defined(SGX545) +#include "sgx545defs.h" +#else +#if defined(SGX531) +#include "sgx531defs.h" +#else +#if defined(SGX554) +#include "sgx554defs.h" +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif + +#if defined(SGX_FEATURE_MP) +#if defined(SGX554) +#include "sgxmpplusdefs.h" +#else +#include "sgxmpdefs.h" +#endif /* SGX554 */ +#else /* SGX_FEATURE_MP */ +#if defined(SGX_FEATURE_SYSTEM_CACHE) +#include "mnemedefs.h" +#endif +#endif /* SGX_FEATURE_MP */ + +/***************************************************************************** + Core specific defines. +*****************************************************************************/ + +#endif /* _SGXDEFS_H_ */ + +/***************************************************************************** + End of file (sgxdefs.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/hwdefs/sgxerrata.h b/pvr-source/services4/srvkm/hwdefs/sgxerrata.h new file mode 100644 index 0000000..711e356 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgxerrata.h @@ -0,0 +1,518 @@ +/*************************************************************************/ /*! +@Title SGX HW errata definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Specifies associations between SGX core revisions + and SW workarounds required to fix HW errata that exist + in specific core revisions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _SGXERRATA_KM_H_ +#define _SGXERRATA_KM_H_ + +/* ignore warnings about unrecognised preprocessing directives in conditional inclusion directives */ +/* PRQA S 3115 ++ */ + +#if defined(SGX520) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX520 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 111 + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + /* RTL head - no BRNs to apply */ + #else + #error "sgxerrata.h: SGX520 Core Revision unspecified" + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if defined(SGX530) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX530 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 120 + #define FIX_HW_BRN_22934/* Workaround in sgx featuredefs */ + #define FIX_HW_BRN_28889/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 121 + #define FIX_HW_BRN_22934/* Workaround in sgx featuredefs */ + #define FIX_HW_BRN_28889/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 125 + #define FIX_HW_BRN_22934/* Workaround in sgx featuredefs */ + #define FIX_HW_BRN_28889/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 130 + #define FIX_HW_BRN_22934/* Workaround in sgx featuredefs */ + #define FIX_HW_BRN_28889/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + /* RTL head - no BRNs to apply */ + #else + #error "sgxerrata.h: SGX530 Core Revision unspecified" + #endif + #endif + #endif +#endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if defined(SGX531) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX531 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 101 + #define FIX_HW_BRN_26620/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_28011/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_34028/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 110 + #define FIX_HW_BRN_34028/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + /* RTL head - no BRNs to apply */ + #else + #error "sgxerrata.h: SGX531 Core Revision unspecified" + #endif + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX535 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 121 + #define FIX_HW_BRN_22934/* Workaround in sgx featuredefs */ + #define FIX_HW_BRN_23944/* Workaround in code (services) */ + #define FIX_HW_BRN_23410/* Workaround in code (services) and ucode */ + #else + #if SGX_CORE_REV == 126 + #define FIX_HW_BRN_22934/* Workaround in sgx featuredefs */ + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + /* RTL head - no BRNs to apply */ + #else + #error "sgxerrata.h: SGX535 Core Revision unspecified" + + #endif + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if defined(SGX540) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX540 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 101 + #define FIX_HW_BRN_25499/* Workaround in sgx featuredefs */ + #define FIX_HW_BRN_25503/* Workaround in code (services) */ + #define FIX_HW_BRN_26620/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_28011/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_34028/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 110 + #define FIX_HW_BRN_25503/* Workaround in code (services) */ + #define FIX_HW_BRN_26620/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_28011/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_34028/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 120 + #define FIX_HW_BRN_26620/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_28011/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_34028/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 121 + #define FIX_HW_BRN_28011/* Workaround in services (srvkm) */ + #define FIX_HW_BRN_34028/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == 130 + #define FIX_HW_BRN_34028/* Workaround in services (srvkm) */ + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + /* RTL head - no BRNs to apply */ + #else + #error "sgxerrata.h: SGX540 Core Revision unspecified" + #endif + #endif + #endif + #endif + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + + +#if defined(SGX543) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX543 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 122 + #define FIX_HW_BRN_29954/* turns off regbank split feature */ + #define FIX_HW_BRN_29997/* workaround in services */ + #define FIX_HW_BRN_30954/* workaround in services */ + #define FIX_HW_BRN_31093/* workaround in services */ + #define FIX_HW_BRN_31195/* workaround in services */ + #define FIX_HW_BRN_31272/* workaround in services (srvclient) and uKernel */ + #define FIX_HW_BRN_31278/* disabled prefetching in MMU */ + #define FIX_HW_BRN_31542/* workaround in uKernel and Services */ + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31620/* workaround in services */ + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #define FIX_HW_BRN_32044 /* workaround in uKernel, services and client drivers */ + #define FIX_HW_BRN_32085 /* workaround in services: prefetch fix applied, investigating PT based fix */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_33920/* workaround in ukernel */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + /* add BRNs here */ + #else + #if SGX_CORE_REV == 1221 + #define FIX_HW_BRN_29954/* turns off regbank split feature */ + #define FIX_HW_BRN_31195/* workaround in services */ + #define FIX_HW_BRN_31272/* workaround in services (srvclient) and uKernel */ + #define FIX_HW_BRN_31278/* disabled prefetching in MMU */ + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31542/* workaround in uKernel and Services */ + #define FIX_HW_BRN_31671/* workaround in uKernel */ + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #define FIX_HW_BRN_32044/* workaround in uKernel, services and client drivers */ + #define FIX_HW_BRN_32085 /* workaround in services: prefetch fix applied, investigating PT based fix */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_33920/* workaround in ukernel */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + /* add BRNs here */ + #else + #if SGX_CORE_REV == 141 + #define FIX_HW_BRN_29954/* turns off regbank split feature */ + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31671 /* workaround in uKernel */ + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + /* add BRNs here */ + #else + #if SGX_CORE_REV == 142 + #define FIX_HW_BRN_29954/* turns off regbank split feature */ + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31671 /* workaround in uKernel */ + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + /* add BRNs here */ + #else + #if SGX_CORE_REV == 2111 + #define FIX_HW_BRN_30982 /* workaround in uKernel and services */ + #define FIX_HW_BRN_31093/* workaround in services */ + #define FIX_HW_BRN_31195/* workaround in services */ + #define FIX_HW_BRN_31272/* workaround in services (srvclient) and uKernel */ + #define FIX_HW_BRN_31278/* disabled prefetching in MMU */ + #define FIX_HW_BRN_31542/* workaround in uKernel and Services */ + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31620/* workaround in services */ + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #define FIX_HW_BRN_32044 /* workaround in uKernel, services and client drivers */ + #define FIX_HW_BRN_32085 /* workaround in services: prefetch fix applied, investigating PT based fix */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_33920/* workaround in ukernel */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + /* add BRNs here */ + #else + #if SGX_CORE_REV == 213 + #define FIX_HW_BRN_31272/* workaround in services (srvclient) and uKernel */ + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31671 /* workaround in uKernel */ + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #define FIX_HW_BRN_32085 /* workaround in services: prefetch fix applied, investigating PT based fix */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_33920/* workaround in ukernel */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + /* add BRNs here */ + #else + #if SGX_CORE_REV == 216 + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == 302 + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == 303 + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #else + #error "sgxerrata.h: SGX543 Core Revision unspecified" + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if defined(SGX544) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX544 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 104 + #define FIX_HW_BRN_29954/* turns off regbank split feature */ + #define FIX_HW_BRN_31093/* workaround in services */ + #define FIX_HW_BRN_31195/* workaround in services */ + #define FIX_HW_BRN_31272/* workaround in services (srvclient) and uKernel */ + #define FIX_HW_BRN_31278/* disabled prefetching in MMU */ + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31542 /* workaround in uKernel and Services */ + #define FIX_HW_BRN_31620/* workaround in services */ + #define FIX_HW_BRN_31671 /* workaround in uKernel */ + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #define FIX_HW_BRN_32044 /* workaround in uKernel, services and client drivers */ + #define FIX_HW_BRN_32085 /* workaround in services: prefetch fix applied, investigating PT based fix */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_33920/* workaround in ukernel */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == 105 + #if defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_31559/* workaround in services and uKernel */ + #endif + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_33920/* workaround in ukernel */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == 112 + #define FIX_HW_BRN_31272/* workaround in services (srvclient) and uKernel */ + #define FIX_HW_BRN_33920/* workaround in ukernel */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == 114 + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == 115 + #define FIX_HW_BRN_31780/* workaround in uKernel */ + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == 116 + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel */ + #endif + #define FIX_HW_BRN_33809/* workaround in kernel (enable burst combiner) */ + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #else + #error "sgxerrata.h: SGX544 Core Revision unspecified" + #endif + #endif + #endif + #endif + #endif + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if defined(SGX545) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX545 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 109 + #define FIX_HW_BRN_29702/* Workaround in services */ + #define FIX_HW_BRN_29823/* Workaround in services */ + #define FIX_HW_BRN_31939/* workaround in uKernel */ + #else + #if SGX_CORE_REV == 10131 + #else + #if SGX_CORE_REV == 1014 + #else + #if SGX_CORE_REV == 10141 + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + /* RTL head - no BRNs to apply */ + #else + #error "sgxerrata.h: SGX545 Core Revision unspecified" + #endif + #endif + #endif + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if defined(SGX554) && !defined(SGX_CORE_DEFINED) + /* define the _current_ SGX554 RTL head revision */ + #define SGX_CORE_REV_HEAD 0 + #if defined(USE_SGX_CORE_REV_HEAD) + /* build config selects Core Revision to be the Head */ + #define SGX_CORE_REV SGX_CORE_REV_HEAD + #endif + + #if SGX_CORE_REV == 1251 + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #define FIX_HW_BRN_36513 /* workaround in uKernel and Services */ + /* add BRNs here */ + #else + #if SGX_CORE_REV == SGX_CORE_REV_HEAD + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && defined(SGX_FEATURE_MP) + #define FIX_HW_BRN_33657/* workaround in ukernel*/ + #endif + #else + #error "sgxerrata.h: SGX554 Core Revision unspecified" + #endif + #endif + /* signal that the Core Version has a valid definition */ + #define SGX_CORE_DEFINED +#endif + +#if !defined(SGX_CORE_DEFINED) +#if defined (__GNUC__) + #warning "sgxerrata.h: SGX Core Version unspecified" +#else + #pragma message("sgxerrata.h: SGX Core Version unspecified") +#endif +#endif + +/* restore warning */ +/* PRQA S 3115 -- */ + +#endif /* _SGXERRATA_KM_H_ */ + +/****************************************************************************** + End of file (sgxerrata.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/hwdefs/sgxfeaturedefs.h b/pvr-source/services4/srvkm/hwdefs/sgxfeaturedefs.h new file mode 100644 index 0000000..3e3a116 --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgxfeaturedefs.h @@ -0,0 +1,274 @@ +/*************************************************************************/ /*! +@Title SGX fexture definitions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#if defined(SGX520) + #define SGX_CORE_FRIENDLY_NAME "SGX520" + #define SGX_CORE_ID SGX_CORE_ID_520 + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28) + #define SGX_FEATURE_NUM_USE_PIPES (1) + #define SGX_FEATURE_AUTOCLOCKGATING +#else +#if defined(SGX530) + #define SGX_CORE_FRIENDLY_NAME "SGX530" + #define SGX_CORE_ID SGX_CORE_ID_530 + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28) + #define SGX_FEATURE_NUM_USE_PIPES (2) + #define SGX_FEATURE_AUTOCLOCKGATING +#else +#if defined(SGX531) + #define SGX_CORE_FRIENDLY_NAME "SGX531" + #define SGX_CORE_ID SGX_CORE_ID_531 + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28) + #define SGX_FEATURE_NUM_USE_PIPES (2) + #define SGX_FEATURE_AUTOCLOCKGATING + #define SGX_FEATURE_MULTI_EVENT_KICK +#else +#if defined(SGX535) + #define SGX_CORE_FRIENDLY_NAME "SGX535" + #define SGX_CORE_ID SGX_CORE_ID_535 + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32) + #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS + #define SGX_FEATURE_BIF_NUM_DIRLISTS (16) + #define SGX_FEATURE_2D_HARDWARE + #define SGX_FEATURE_NUM_USE_PIPES (2) + #define SGX_FEATURE_AUTOCLOCKGATING + #define SUPPORT_SGX_GENERAL_MAPPING_HEAP + #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE +#else +#if defined(SGX540) + #define SGX_CORE_FRIENDLY_NAME "SGX540" + #define SGX_CORE_ID SGX_CORE_ID_540 + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28) + #define SGX_FEATURE_NUM_USE_PIPES (4) + #define SGX_FEATURE_AUTOCLOCKGATING + #define SGX_FEATURE_MULTI_EVENT_KICK +#else +#if defined(SGX543) + #define SGX_CORE_FRIENDLY_NAME "SGX543" + #define SGX_CORE_ID SGX_CORE_ID_543 + #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING + #define SGX_FEATURE_USE_UNLIMITED_PHASES + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32) + #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS + #define SGX_FEATURE_BIF_NUM_DIRLISTS (8) + #define SGX_FEATURE_NUM_USE_PIPES (4) + #define SGX_FEATURE_AUTOCLOCKGATING + #define SGX_FEATURE_MONOLITHIC_UKERNEL + #define SGX_FEATURE_MULTI_EVENT_KICK + #define SGX_FEATURE_DATA_BREAKPOINTS + #define SGX_FEATURE_PERPIPE_BKPT_REGS + #define SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES (2) + #define SGX_FEATURE_2D_HARDWARE + #define SGX_FEATURE_PTLA + #define SGX_FEATURE_EXTENDED_PERF_COUNTERS + #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) + #if defined(SGX_FEATURE_MP) + #define SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH + #endif + #define SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH + #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH + #endif +#else +#if defined(SGX544) + #define SGX_CORE_FRIENDLY_NAME "SGX544" + #define SGX_CORE_ID SGX_CORE_ID_544 + #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING + #define SGX_FEATURE_USE_UNLIMITED_PHASES + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32) + #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS + #define SGX_FEATURE_BIF_NUM_DIRLISTS (8) + #define SGX_FEATURE_NUM_USE_PIPES (4) + #define SGX_FEATURE_AUTOCLOCKGATING + #define SGX_FEATURE_MONOLITHIC_UKERNEL + #define SGX_FEATURE_MULTI_EVENT_KICK +// #define SGX_FEATURE_DATA_BREAKPOINTS +// #define SGX_FEATURE_PERPIPE_BKPT_REGS +// #define SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES (2) +// #define SGX_FEATURE_2D_HARDWARE +// #define SGX_FEATURE_PTLA + #define SGX_FEATURE_EXTENDED_PERF_COUNTERS + #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) + #if defined(SGX_FEATURE_MP) + #define SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH + #define SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH + #endif + #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH + #endif +#else +#if defined(SGX545) + #define SGX_CORE_FRIENDLY_NAME "SGX545" + #define SGX_CORE_ID SGX_CORE_ID_545 + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32) + #define SGX_FEATURE_AUTOCLOCKGATING + #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING + #define SGX_FEATURE_USE_UNLIMITED_PHASES + #define SGX_FEATURE_VOLUME_TEXTURES + #define SGX_FEATURE_HOST_ALLOC_FROM_DPM + #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS + #define SGX_FEATURE_BIF_NUM_DIRLISTS (16) + #define SGX_FEATURE_NUM_USE_PIPES (4) + #define SGX_FEATURE_TEXTURESTRIDE_EXTENSION + #define SGX_FEATURE_PDS_DATA_INTERLEAVE_2DWORDS + #define SGX_FEATURE_MONOLITHIC_UKERNEL + #define SGX_FEATURE_ZLS_EXTERNALZ + #define SGX_FEATURE_NUM_PDS_PIPES (2) + #define SGX_FEATURE_NATIVE_BACKWARD_BLIT + #define SGX_FEATURE_MAX_TA_RENDER_TARGETS (512) + #define SGX_FEATURE_SECONDARY_REQUIRES_USE_KICK + #define SGX_FEATURE_WRITEBACK_DCU + //FIXME: this is defined in the build config for now + //#define SGX_FEATURE_36BIT_MMU + #define SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS + #define SGX_FEATURE_MULTI_EVENT_KICK + #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) + #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH + #endif +#else +#if defined(SGX554) + #define SGX_CORE_FRIENDLY_NAME "SGX554" + #define SGX_CORE_ID SGX_CORE_ID_554 + #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING + #define SGX_FEATURE_USE_UNLIMITED_PHASES + #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32) + #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS + #define SGX_FEATURE_BIF_NUM_DIRLISTS (8) + #define SGX_FEATURE_NUM_USE_PIPES (8) + #define SGX_FEATURE_AUTOCLOCKGATING + #define SGX_FEATURE_MONOLITHIC_UKERNEL + #define SGX_FEATURE_MULTI_EVENT_KICK +// #define SGX_FEATURE_DATA_BREAKPOINTS +// #define SGX_FEATURE_PERPIPE_BKPT_REGS +// #define SGX_FEATURE_PERPIPE_BKPT_REGS_NUMPIPES (2) + #define SGX_FEATURE_2D_HARDWARE + #define SGX_FEATURE_PTLA + #define SGX_FEATURE_EXTENDED_PERF_COUNTERS + #define SGX_FEATURE_EDM_VERTEX_PDSADDR_FULL_RANGE + #if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) + #if defined(SGX_FEATURE_MP) + #define SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH + #endif + #define SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH + #define SGX_FEATURE_SW_ISP_CONTEXT_SWITCH + #endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif +#endif + +#if defined(SGX_FEATURE_SLAVE_VDM_CONTEXT_SWITCH) \ + || defined(SGX_FEATURE_MASTER_VDM_CONTEXT_SWITCH) +/* Enable the define so common code for HW VDMCS code is compiled */ +#define SGX_FEATURE_VDM_CONTEXT_SWITCH +#endif + +/* + 'switch-off' features if defined BRNs affect the feature +*/ + +#if defined(FIX_HW_BRN_27266) +#undef SGX_FEATURE_36BIT_MMU +#endif + +#if defined(FIX_HW_BRN_22934) \ + || defined(FIX_HW_BRN_25499) +#undef SGX_FEATURE_MULTI_EVENT_KICK +#endif + +#if defined(SGX_FEATURE_SYSTEM_CACHE) + #if defined(SGX_FEATURE_36BIT_MMU) + #error SGX_FEATURE_SYSTEM_CACHE is incompatible with SGX_FEATURE_36BIT_MMU + #endif + #if defined(FIX_HW_BRN_26620) && !defined(SGX_FEATURE_MULTI_EVENT_KICK) + #define SGX_BYPASS_SYSTEM_CACHE + #endif +#endif + +#if defined(FIX_HW_BRN_29954) +#undef SGX_FEATURE_PERPIPE_BKPT_REGS +#endif + +#if defined(FIX_HW_BRN_31620) +#undef SGX_FEATURE_MULTIPLE_MEM_CONTEXTS +#undef SGX_FEATURE_BIF_NUM_DIRLISTS +#endif + +/* + Derive other definitions: +*/ + +/* define default MP core count */ +#if defined(SGX_FEATURE_MP) +#if defined(SGX_FEATURE_MP_CORE_COUNT_TA) && defined(SGX_FEATURE_MP_CORE_COUNT_3D) +#if (SGX_FEATURE_MP_CORE_COUNT_TA > SGX_FEATURE_MP_CORE_COUNT_3D) +#error Number of TA cores larger than number of 3D cores not supported in current driver +#endif /* (SGX_FEATURE_MP_CORE_COUNT_TA > SGX_FEATURE_MP_CORE_COUNT_3D) */ +#else +#if defined(SGX_FEATURE_MP_CORE_COUNT) +#define SGX_FEATURE_MP_CORE_COUNT_TA (SGX_FEATURE_MP_CORE_COUNT) +#define SGX_FEATURE_MP_CORE_COUNT_3D (SGX_FEATURE_MP_CORE_COUNT) +#else +#error Either SGX_FEATURE_MP_CORE_COUNT or \ +both SGX_FEATURE_MP_CORE_COUNT_TA and SGX_FEATURE_MP_CORE_COUNT_3D \ +must be defined when SGX_FEATURE_MP is defined +#endif /* SGX_FEATURE_MP_CORE_COUNT */ +#endif /* defined(SGX_FEATURE_MP_CORE_COUNT_TA) && defined(SGX_FEATURE_MP_CORE_COUNT_3D) */ +#else +#define SGX_FEATURE_MP_CORE_COUNT (1) +#define SGX_FEATURE_MP_CORE_COUNT_TA (1) +#define SGX_FEATURE_MP_CORE_COUNT_3D (1) +#endif /* SGX_FEATURE_MP */ + +#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && !defined(SUPPORT_SGX_PRIORITY_SCHEDULING) +#define SUPPORT_SGX_PRIORITY_SCHEDULING +#endif + +#include "img_types.h" + +/****************************************************************************** + End of file (sgxfeaturedefs.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/hwdefs/sgxmmu.h b/pvr-source/services4/srvkm/hwdefs/sgxmmu.h new file mode 100644 index 0000000..a6a907a --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgxmmu.h @@ -0,0 +1,99 @@ +/*************************************************************************/ /*! +@Title SGX MMU defines +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provides SGX MMU declarations and macros +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#if !defined(__SGXMMU_KM_H__) +#define __SGXMMU_KM_H__ + +/* to be implemented */ + +/* SGX MMU maps 4Kb pages */ +#define SGX_MMU_PAGE_SHIFT (12) +#define SGX_MMU_PAGE_SIZE (1U<<SGX_MMU_PAGE_SHIFT) +#define SGX_MMU_PAGE_MASK (SGX_MMU_PAGE_SIZE - 1U) + +/* PD details */ +#define SGX_MMU_PD_SHIFT (10) +#define SGX_MMU_PD_SIZE (1U<<SGX_MMU_PD_SHIFT) +#define SGX_MMU_PD_MASK (0xFFC00000U) + +/* PD Entry details */ +#if defined(SGX_FEATURE_36BIT_MMU) + #define SGX_MMU_PDE_ADDR_MASK (0xFFFFFF00U) + #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (4) +#else + #define SGX_MMU_PDE_ADDR_MASK (0xFFFFF000U) + #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (0) +#endif +#define SGX_MMU_PDE_VALID (0x00000001U) +/* variable page size control field */ +#define SGX_MMU_PDE_PAGE_SIZE_4K (0x00000000U) +#define SGX_MMU_PDE_PAGE_SIZE_16K (0x00000002U) +#define SGX_MMU_PDE_PAGE_SIZE_64K (0x00000004U) +#define SGX_MMU_PDE_PAGE_SIZE_256K (0x00000006U) +#define SGX_MMU_PDE_PAGE_SIZE_1M (0x00000008U) +#define SGX_MMU_PDE_PAGE_SIZE_4M (0x0000000AU) +#define SGX_MMU_PDE_PAGE_SIZE_MASK (0x0000000EU) + +/* PT details */ +#define SGX_MMU_PT_SHIFT (10) +#define SGX_MMU_PT_SIZE (1U<<SGX_MMU_PT_SHIFT) +#define SGX_MMU_PT_MASK (0x003FF000U) + +/* PT Entry details */ +#if defined(SGX_FEATURE_36BIT_MMU) + #define SGX_MMU_PTE_ADDR_MASK (0xFFFFFF00U) + #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (4) +#else + #define SGX_MMU_PTE_ADDR_MASK (0xFFFFF000U) + #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (0) +#endif +#define SGX_MMU_PTE_VALID (0x00000001U) +#define SGX_MMU_PTE_WRITEONLY (0x00000002U) +#define SGX_MMU_PTE_READONLY (0x00000004U) +#define SGX_MMU_PTE_CACHECONSISTENT (0x00000008U) +#define SGX_MMU_PTE_EDMPROTECT (0x00000010U) + +#endif /* __SGXMMU_KM_H__ */ + +/***************************************************************************** + End of file (sgxmmu.h) +*****************************************************************************/ diff --git a/pvr-source/services4/srvkm/hwdefs/sgxmpdefs.h b/pvr-source/services4/srvkm/hwdefs/sgxmpdefs.h new file mode 100644 index 0000000..4b9649f --- /dev/null +++ b/pvr-source/services4/srvkm/hwdefs/sgxmpdefs.h @@ -0,0 +1,365 @@ +/*************************************************************************/ /*! +@Title Hardware defs for SGXMP. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _SGXMPDEFS_KM_H_ +#define _SGXMPDEFS_KM_H_ + +/* Register EUR_CR_MASTER_BIF_CTRL */ +#define EUR_CR_MASTER_BIF_CTRL 0x4C00 +#define EUR_CR_MASTER_BIF_CTRL_NOREORDER_MASK 0x00000001U +#define EUR_CR_MASTER_BIF_CTRL_NOREORDER_SHIFT 0 +#define EUR_CR_MASTER_BIF_CTRL_NOREORDER_SIGNED 0 +#define EUR_CR_MASTER_BIF_CTRL_PAUSE_MASK 0x00000002U +#define EUR_CR_MASTER_BIF_CTRL_PAUSE_SHIFT 1 +#define EUR_CR_MASTER_BIF_CTRL_PAUSE_SIGNED 0 +#define EUR_CR_MASTER_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010U +#define EUR_CR_MASTER_BIF_CTRL_CLEAR_FAULT_SHIFT 4 +#define EUR_CR_MASTER_BIF_CTRL_CLEAR_FAULT_SIGNED 0 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_PTLA_MASK 0x00010000U +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_PTLA_SHIFT 16 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_PTLA_SIGNED 0 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_VDM_MASK 0x00020000U +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SHIFT 17 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_VDM_SIGNED 0 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_IPF_MASK 0x00040000U +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SHIFT 18 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_IPF_SIGNED 0 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_DPM_MASK 0x00080000U +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SHIFT 19 +#define EUR_CR_MASTER_BIF_CTRL_MMU_BYPASS_MASTER_DPM_SIGNED 0 +/* Register EUR_CR_MASTER_BIF_CTRL_INVAL */ +#define EUR_CR_MASTER_BIF_CTRL_INVAL 0x4C34 +#define EUR_CR_MASTER_BIF_CTRL_INVAL_PTE_MASK 0x00000004U +#define EUR_CR_MASTER_BIF_CTRL_INVAL_PTE_SHIFT 2 +#define EUR_CR_MASTER_BIF_CTRL_INVAL_PTE_SIGNED 0 +#define EUR_CR_MASTER_BIF_CTRL_INVAL_ALL_MASK 0x00000008U +#define EUR_CR_MASTER_BIF_CTRL_INVAL_ALL_SHIFT 3 +#define EUR_CR_MASTER_BIF_CTRL_INVAL_ALL_SIGNED 0 +/* Register EUR_CR_MASTER_BIF_MMU_CTRL */ +#define EUR_CR_MASTER_BIF_MMU_CTRL 0x4CD0 +#define EUR_CR_MASTER_BIF_MMU_CTRL_PREFETCHING_ON_MASK 0x00000001U +#define EUR_CR_MASTER_BIF_MMU_CTRL_PREFETCHING_ON_SHIFT 0 +#define EUR_CR_MASTER_BIF_MMU_CTRL_PREFETCHING_ON_SIGNED 0 +#define EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_MASK 0x00000006U +#define EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SHIFT 1 +#define EUR_CR_MASTER_BIF_MMU_CTRL_ADDR_HASH_MODE_SIGNED 0 +#define EUR_CR_MASTER_BIF_MMU_CTRL_ENABLE_DC_TLB_MASK 0x00000010U +#define EUR_CR_MASTER_BIF_MMU_CTRL_ENABLE_DC_TLB_SHIFT 4 +#define EUR_CR_MASTER_BIF_MMU_CTRL_ENABLE_DC_TLB_SIGNED 0 +/* Register EUR_CR_MASTER_SLC_CTRL */ +#define EUR_CR_MASTER_SLC_CTRL 0x4D00 +#define EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_MASK 0x00800000U +#define EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_SHIFT 23 +#define EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_DISABLE_BURST_EXP_MASK 0x00400000U +#define EUR_CR_MASTER_SLC_CTRL_DISABLE_BURST_EXP_SHIFT 22 +#define EUR_CR_MASTER_SLC_CTRL_DISABLE_BURST_EXP_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ3_MASK 0x00200000U +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ3_SHIFT 21 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ3_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ2_MASK 0x00100000U +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ2_SHIFT 20 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ2_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ1_MASK 0x00080000U +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ1_SHIFT 19 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ1_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK 0x00040000U +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_SHIFT 18 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_DM_REF_SET_ALL_MASK 0x00010000U +#define EUR_CR_MASTER_SLC_CTRL_DM_REF_SET_ALL_SHIFT 16 +#define EUR_CR_MASTER_SLC_CTRL_DM_REF_SET_ALL_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_MASK 0x0000F000U +#define EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT 12 +#define EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_MASK 0x00000E00U +#define EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SHIFT 9 +#define EUR_CR_MASTER_SLC_CTRL_ADDR_DECODE_MODE_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_PAUSE_MASK 0x00000100U +#define EUR_CR_MASTER_SLC_CTRL_PAUSE_SHIFT 8 +#define EUR_CR_MASTER_SLC_CTRL_PAUSE_SIGNED 0 +/* Register EUR_CR_MASTER_SLC_CTRL_BYPASS */ +#define EUR_CR_MASTER_SLC_CTRL_BYPASS 0x4D04 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_N_MASK 0x08000000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT 27 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_N_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK 0x04000000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_SHIFT 26 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE4_MASK 0x02000000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE4_SHIFT 25 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE4_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE3_MASK 0x01000000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE3_SHIFT 24 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE3_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE2_MASK 0x00800000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE2_SHIFT 23 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE2_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE1_MASK 0x00400000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE1_SHIFT 22 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE1_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE0_MASK 0x00200000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE0_SHIFT 21 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_CORE0_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PTLA_MASK 0x00100000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PTLA_SHIFT 20 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PTLA_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_ISP2_RCIF_MASK 0x00080000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_ISP2_RCIF_SHIFT 19 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_ISP2_RCIF_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_ZLS_MASK 0x00040000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_ZLS_SHIFT 18 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_ZLS_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PBE_MASK 0x00020000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PBE_SHIFT 17 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PBE_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_VDM_MASK 0x00010000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_VDM_SHIFT 16 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_VDM_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_IPF_MASK 0x00008000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_IPF_SHIFT 15 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_IPF_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PDS_MASK 0x00004000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PDS_SHIFT 14 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_PDS_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USEC_MASK 0x00002000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USEC_SHIFT 13 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USEC_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_MASK 0x00001000U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_SHIFT 12 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_MASK 0x00000800U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_SHIFT 11 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_MASK 0x00000400U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_SHIFT 10 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_MASK 0x00000200U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_SHIFT 9 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_IPF_OBJ_MASK 0x00000100U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT 8 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TPF_MASK 0x00000080U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TPF_SHIFT 7 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TPF_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_MASK 0x00000040U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_SHIFT 6 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_CACHE_MASK 0x00000020U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_CACHE_SHIFT 5 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_CACHE_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_MMU_MASK 0x00000010U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_MMU_SHIFT 4 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_MMU_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_EVENT_MASK 0x00000008U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_EVENT_SHIFT 3 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_EVENT_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_PIXEL_MASK 0x00000004U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT 2 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_PIXEL_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_VERTEX_MASK 0x00000002U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_VERTEX_SHIFT 1 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_DM_VERTEX_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_MASK 0x00000001U +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_SHIFT 0 +#define EUR_CR_MASTER_SLC_CTRL_BYPASS_ALL_SIGNED 0 +/* Register EUR_CR_MASTER_SLC_CTRL_USSE_INVAL */ +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL 0x4D08 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_ADDR_MASK 0xFFFFFFFFU +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_ADDR_SHIFT 0 +#define EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_ADDR_SIGNED 0 +/* Register EUR_CR_MASTER_SLC_CTRL_INVAL */ +#define EUR_CR_MASTER_SLC_CTRL_INVAL 0x4D28 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_EVENT_MASK 0x00000008U +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_EVENT_SHIFT 3 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_EVENT_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_PIXEL_MASK 0x00000004U +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_PIXEL_SHIFT 2 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_PIXEL_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_VERTEX_MASK 0x00000002U +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_VERTEX_SHIFT 1 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_DM_VERTEX_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_ALL_MASK 0x00000001U +#define EUR_CR_MASTER_SLC_CTRL_INVAL_ALL_SHIFT 0 +#define EUR_CR_MASTER_SLC_CTRL_INVAL_ALL_SIGNED 0 +/* Register EUR_CR_MASTER_SLC_CTRL_FLUSH */ +#define EUR_CR_MASTER_SLC_CTRL_FLUSH 0x4D2C +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_EVENT_MASK 0x00000080U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_EVENT_SHIFT 7 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_EVENT_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_PIXEL_MASK 0x00000040U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_PIXEL_SHIFT 6 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_PIXEL_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_VERTEX_MASK 0x00000020U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_VERTEX_SHIFT 5 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_DM_VERTEX_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_ALL_MASK 0x00000010U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_ALL_SHIFT 4 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_ALL_SIGNED 0 +/* Register EUR_CR_MASTER_SLC_CTRL_FLUSH_INV */ +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV 0x4D34 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_EVENT_MASK 0x00000080U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_EVENT_SHIFT 7 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_EVENT_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_PIXEL_MASK 0x00000040U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_PIXEL_SHIFT 6 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_PIXEL_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_VERTEX_MASK 0x00000020U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_VERTEX_SHIFT 5 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_DM_VERTEX_SIGNED 0 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_ALL_MASK 0x00000010U +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_ALL_SHIFT 4 +#define EUR_CR_MASTER_SLC_CTRL_FLUSH_INV_ALL_SIGNED 0 +/* Register EUR_CR_MASTER_BREAKPOINT_READ */ +#define EUR_CR_MASTER_BREAKPOINT_READ 0x4F18 +#define EUR_CR_MASTER_BREAKPOINT_READ_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_MASTER_BREAKPOINT_READ_ADDRESS_SHIFT 4 +#define EUR_CR_MASTER_BREAKPOINT_READ_ADDRESS_SIGNED 0 +/* Register EUR_CR_MASTER_BREAKPOINT_TRAP */ +#define EUR_CR_MASTER_BREAKPOINT_TRAP 0x4F1C +#define EUR_CR_MASTER_BREAKPOINT_TRAP_CONTINUE_MASK 0x00000002U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_CONTINUE_SHIFT 1 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_CONTINUE_SIGNED 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_WRNOTIFY_MASK 0x00000001U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_WRNOTIFY_SHIFT 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_WRNOTIFY_SIGNED 0 +/* Register EUR_CR_MASTER_BREAKPOINT */ +#define EUR_CR_MASTER_BREAKPOINT 0x4F20 +#define EUR_CR_MASTER_BREAKPOINT_ID_MASK 0x00000030U +#define EUR_CR_MASTER_BREAKPOINT_ID_SHIFT 4 +#define EUR_CR_MASTER_BREAKPOINT_ID_SIGNED 0 +#define EUR_CR_MASTER_BREAKPOINT_UNTRAPPED_MASK 0x00000008U +#define EUR_CR_MASTER_BREAKPOINT_UNTRAPPED_SHIFT 3 +#define EUR_CR_MASTER_BREAKPOINT_UNTRAPPED_SIGNED 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAPPED_MASK 0x00000004U +#define EUR_CR_MASTER_BREAKPOINT_TRAPPED_SHIFT 2 +#define EUR_CR_MASTER_BREAKPOINT_TRAPPED_SIGNED 0 +/* Register EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0 */ +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0 0x4F24 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0_ADDRESS_MASK 0xFFFFFFF0U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0_ADDRESS_SHIFT 4 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO0_ADDRESS_SIGNED 0 +/* Register EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1 */ +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1 0x4F28 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_SIZE_MASK 0x00007C00U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_SIZE_SHIFT 10 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_SIZE_SIGNED 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_NUMBER_MASK 0x00000300U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_NUMBER_SHIFT 8 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_NUMBER_SIGNED 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_TAG_MASK 0x000000F8U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_TAG_SHIFT 3 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_TAG_SIGNED 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_DATA_MASTER_MASK 0x00000006U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SHIFT 1 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_DATA_MASTER_SIGNED 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_RNW_MASK 0x00000001U +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_RNW_SHIFT 0 +#define EUR_CR_MASTER_BREAKPOINT_TRAP_INFO1_RNW_SIGNED 0 +/* Register EUR_CR_MASTER_CORE */ +#define EUR_CR_MASTER_CORE 0x4000 +#define EUR_CR_MASTER_CORE_ENABLE_MASK 0x00000003U +#define EUR_CR_MASTER_CORE_ENABLE_SHIFT 0 +#define EUR_CR_MASTER_CORE_ENABLE_SIGNED 0 +/* Register EUR_CR_MASTER_CORE_ID */ +#define EUR_CR_MASTER_CORE_ID 0x4010 +#define EUR_CR_MASTER_CORE_ID_CONFIG_MULTI_MASK 0x00000001U +#define EUR_CR_MASTER_CORE_ID_CONFIG_MULTI_SHIFT 0 +#define EUR_CR_MASTER_CORE_ID_CONFIG_MULTI_SIGNED 0 +#define EUR_CR_MASTER_CORE_ID_CONFIG_BASE_MASK 0x00000002U +#define EUR_CR_MASTER_CORE_ID_CONFIG_BASE_SHIFT 1 +#define EUR_CR_MASTER_CORE_ID_CONFIG_BASE_SIGNED 0 +#define EUR_CR_MASTER_CORE_ID_CONFIG_MASK 0x000000FCU +#define EUR_CR_MASTER_CORE_ID_CONFIG_SHIFT 2 +#define EUR_CR_MASTER_CORE_ID_CONFIG_SIGNED 0 +#define EUR_CR_MASTER_CORE_ID_CONFIG_CORES_MASK 0x00000F00U +#define EUR_CR_MASTER_CORE_ID_CONFIG_CORES_SHIFT 8 +#define EUR_CR_MASTER_CORE_ID_CONFIG_CORES_SIGNED 0 +#define EUR_CR_MASTER_CORE_ID_CONFIG_SLC_MASK 0x0000F000U +#define EUR_CR_MASTER_CORE_ID_CONFIG_SLC_SHIFT 12 +#define EUR_CR_MASTER_CORE_ID_CONFIG_SLC_SIGNED 0 +#define EUR_CR_MASTER_CORE_ID_ID_MASK 0xFFFF0000U +#define EUR_CR_MASTER_CORE_ID_ID_SHIFT 16 +#define EUR_CR_MASTER_CORE_ID_ID_SIGNED 0 +/* Register EUR_CR_MASTER_CORE_REVISION */ +#define EUR_CR_MASTER_CORE_REVISION 0x4014 +#define EUR_CR_MASTER_CORE_REVISION_MAINTENANCE_MASK 0x000000FFU +#define EUR_CR_MASTER_CORE_REVISION_MAINTENANCE_SHIFT 0 +#define EUR_CR_MASTER_CORE_REVISION_MAINTENANCE_SIGNED 0 +#define EUR_CR_MASTER_CORE_REVISION_MINOR_MASK 0x0000FF00U +#define EUR_CR_MASTER_CORE_REVISION_MINOR_SHIFT 8 +#define EUR_CR_MASTER_CORE_REVISION_MINOR_SIGNED 0 +#define EUR_CR_MASTER_CORE_REVISION_MAJOR_MASK 0x00FF0000U +#define EUR_CR_MASTER_CORE_REVISION_MAJOR_SHIFT 16 +#define EUR_CR_MASTER_CORE_REVISION_MAJOR_SIGNED 0 +#define EUR_CR_MASTER_CORE_REVISION_DESIGNER_MASK 0xFF000000U +#define EUR_CR_MASTER_CORE_REVISION_DESIGNER_SHIFT 24 +#define EUR_CR_MASTER_CORE_REVISION_DESIGNER_SIGNED 0 +/* Register EUR_CR_MASTER_SOFT_RESET */ +#define EUR_CR_MASTER_SOFT_RESET 0x4080 +#define EUR_CR_MASTER_SOFT_RESET_CORE_RESET_MASK(i) (0x00000001U << (0 + ((i) * 1))) +#define EUR_CR_MASTER_SOFT_RESET_CORE_RESET_SHIFT(i) (0 + ((i) * 1)) +#define EUR_CR_MASTER_SOFT_RESET_CORE_RESET_REGNUM(i) 0x4080 +#define EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK 0x00000010U +#define EUR_CR_MASTER_SOFT_RESET_IPF_RESET_SHIFT 4 +#define EUR_CR_MASTER_SOFT_RESET_IPF_RESET_SIGNED 0 +#define EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK 0x00000020U +#define EUR_CR_MASTER_SOFT_RESET_DPM_RESET_SHIFT 5 +#define EUR_CR_MASTER_SOFT_RESET_DPM_RESET_SIGNED 0 +#define EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK 0x00000040U +#define EUR_CR_MASTER_SOFT_RESET_VDM_RESET_SHIFT 6 +#define EUR_CR_MASTER_SOFT_RESET_VDM_RESET_SIGNED 0 +#define EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK 0x00000080U +#define EUR_CR_MASTER_SOFT_RESET_SLC_RESET_SHIFT 7 +#define EUR_CR_MASTER_SOFT_RESET_SLC_RESET_SIGNED 0 +#define EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK 0x00000100U +#define EUR_CR_MASTER_SOFT_RESET_BIF_RESET_SHIFT 8 +#define EUR_CR_MASTER_SOFT_RESET_BIF_RESET_SIGNED 0 +#define EUR_CR_MASTER_SOFT_RESET_MCI_RESET_MASK 0x00000200U +#define EUR_CR_MASTER_SOFT_RESET_MCI_RESET_SHIFT 9 +#define EUR_CR_MASTER_SOFT_RESET_MCI_RESET_SIGNED 0 +#define EUR_CR_MASTER_SOFT_RESET_PTLA_RESET_MASK 0x00000400U +#define EUR_CR_MASTER_SOFT_RESET_PTLA_RESET_SHIFT 10 +#define EUR_CR_MASTER_SOFT_RESET_PTLA_RESET_SIGNED 0 + +#endif /* _SGXMPDEFS_KM_H_ */ + diff --git a/pvr-source/services4/srvkm/include/buffer_manager.h b/pvr-source/services4/srvkm/include/buffer_manager.h new file mode 100644 index 0000000..c16efaa --- /dev/null +++ b/pvr-source/services4/srvkm/include/buffer_manager.h @@ -0,0 +1,674 @@ +/*************************************************************************/ /*! +@Title Buffer Management. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Manages buffers mapped into two virtual memory spaces, host and + device and referenced by handles. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _BUFFER_MANAGER_H_ +#define _BUFFER_MANAGER_H_ + +#include "img_types.h" +#include "ra.h" +#include "perproc.h" + +#if defined(__cplusplus) +extern "C"{ +#endif + +/* forward reference */ +typedef struct _BM_HEAP_ BM_HEAP; + +/* + * The mapping structure is used to record relations between CPU virtual, + * CPU physical and device virtual addresses for large chunks of memory + * from which we have resource-allocator draw our buffers. + * + * There is one per contiguous pool and one per import from the host OS. + */ +struct _BM_MAPPING_ +{ + enum + { + hm_wrapped = 1, /*!< wrapped user supplied contiguous*/ + hm_wrapped_scatter, /*!< wrapped user supplied scattered */ + hm_wrapped_virtaddr, /*!< wrapped user supplied contiguous with virtual address*/ + hm_wrapped_scatter_virtaddr, /*!< wrapped user supplied scattered with virtual address*/ + hm_env, /*!< obtained from environment */ + hm_contiguous /*!< contigous arena */ + } eCpuMemoryOrigin; + + BM_HEAP *pBMHeap; /* which BM heap */ + RA_ARENA *pArena; /* whence the memory comes */ + + IMG_CPU_VIRTADDR CpuVAddr; + IMG_CPU_PHYADDR CpuPAddr; + IMG_DEV_VIRTADDR DevVAddr; + IMG_SYS_PHYADDR *psSysAddr; + IMG_SIZE_T uSize; + IMG_SIZE_T uSizeVM; + IMG_HANDLE hOSMemHandle; + IMG_UINT32 ui32Flags; + + /* Sparse mapping data */ + IMG_UINT32 ui32ChunkSize; + IMG_UINT32 ui32NumVirtChunks; + IMG_UINT32 ui32NumPhysChunks; + IMG_BOOL *pabMapChunk; + + /* GPU mapping reference count + * When goes down to 0 GPU mapping + * gets removed */ + IMG_UINT32 ui32MappingCount; + + /* need to track the original required alignment to make sure + * that an unmapped buffer which is later remapped to device + * is remapped with the original alignment restrictions. + */ + IMG_UINT32 ui32DevVAddrAlignment; +}; + +/* + * The buffer structure handles individual allocations from the user; thus + * there is one allocated per call to BM_Alloc and one per call to BM_Wrap. + * We record a mapping reference so we know where to return allocated + * resources at BM_Free time. + */ +typedef struct _BM_BUF_ +{ + IMG_CPU_VIRTADDR *CpuVAddr; + IMG_VOID *hOSMemHandle; + IMG_CPU_PHYADDR CpuPAddr; + IMG_DEV_VIRTADDR DevVAddr; + + BM_MAPPING *pMapping; + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32ExportCount; +} BM_BUF; + +struct _BM_HEAP_ +{ + IMG_UINT32 ui32Attribs; + BM_CONTEXT *pBMContext; + RA_ARENA *pImportArena; + RA_ARENA *pLocalDevMemArena; + RA_ARENA *pVMArena; + DEV_ARENA_DESCRIPTOR sDevArena; + MMU_HEAP *pMMUHeap; + PDUMP_MMU_ATTRIB *psMMUAttrib; + + struct _BM_HEAP_ *psNext; + struct _BM_HEAP_ **ppsThis; + /* BIF tile stride for this heap */ + IMG_UINT32 ui32XTileStride; +}; + +/* + * The bm-context structure + */ +struct _BM_CONTEXT_ +{ + MMU_CONTEXT *psMMUContext; + + /* + * Resource allocation arena of dual mapped pages. For devices + * where the hardware imposes different constraints on the valid + * device virtual address range depending on the use of the buffer + * we maintain two allocation arenas, one low address range, the + * other high. For devices without such a constrain we do not + * create the high arena, instead all allocations come from the + * low arena. + */ + BM_HEAP *psBMHeap; + + /* + * The Shared Heaps + */ + BM_HEAP *psBMSharedHeap; + + PVRSRV_DEVICE_NODE *psDeviceNode; + + /* + * Hash table management. + */ + HASH_TABLE *pBufferHash; + + /* + * Resman item handle + */ + IMG_HANDLE hResItem; + + IMG_UINT32 ui32RefCount; + + /* + linked list next pointer + */ + struct _BM_CONTEXT_ *psNext; + struct _BM_CONTEXT_ **ppsThis; +}; + +/* refcount.c needs to know the internals of this structure */ +typedef struct _XPROC_DATA_{ + IMG_UINT32 ui32RefCount; + IMG_UINT32 ui32AllocFlags; + IMG_UINT32 ui32Size; + IMG_UINT32 ui32PageSize; + RA_ARENA *psArena; + IMG_SYS_PHYADDR sSysPAddr; + IMG_VOID *pvCpuVAddr; + IMG_HANDLE hOSMemHandle; +} XPROC_DATA; + +extern XPROC_DATA gXProcWorkaroundShareData[]; +/* + Buffer handle. +*/ +typedef IMG_VOID *BM_HANDLE; + +/** Buffer manager allocation flags. + * + * Flags passed to BM_Alloc to specify buffer capabilities. + * + * @defgroup BP Buffer Manager Allocation Flags + * @{ + */ + +/** Pool number mask. */ +#define BP_POOL_MASK 0x7 + +/* Request physically contiguous pages of memory */ +#define BP_CONTIGUOUS (1 << 3) +#define BP_PARAMBUFFER (1 << 4) + +#define BM_MAX_DEVMEM_ARENAS 2 + +/** @} */ + +/** + * @Function BM_CreateContext + * + * @Description + * + * @Input + + * @Return + */ + +IMG_HANDLE +BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_DEV_PHYADDR *psPDDevPAddr, + PVRSRV_PER_PROCESS_DATA *psPerProc, + IMG_BOOL *pbCreated); + + +/** + * @Function BM_DestroyContext + * + * @Description + * + * @Input + * + * @Return PVRSRV_ERROR + */ +PVRSRV_ERROR +BM_DestroyContext (IMG_HANDLE hBMContext, + IMG_BOOL *pbCreated); + + +/** + * @Function BM_CreateHeap + * + * @Description + * + * @Input + * + * @Return + */ +IMG_HANDLE +BM_CreateHeap (IMG_HANDLE hBMContext, + DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo); + +/** + * @Function BM_DestroyHeap + * + * @Description + * + * @Input + * + * @Return + */ +IMG_VOID +BM_DestroyHeap (IMG_HANDLE hDevMemHeap); + + +/** + * @Function BM_Reinitialise + * + * @Description + * + * Reinitialises the buffer manager after a power event. Calling this + * function will reprogram MMU registers and renable the MMU. + * + * @Input None + * @Return None + */ + +IMG_BOOL +BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode); + +/** + * @Function BM_Alloc + * + * @Description + * + * Allocate a buffer mapped into both host and device virtual memory + * maps. + * + * @Input uSize - require size in bytes of the buffer. + * @Input/Output pui32Flags - bit mask of buffer property flags + recieves heap flags. + * @Input uDevVAddrAlignment - required alignment in bytes, or 0. + * @Input pvPrivData - private data passed to OS allocator + * @Input ui32PrivDataLength - length of private data + * @Input ui32ChunkSize - Chunk size + * @Input ui32NumVirtChunks - Number of virtual chunks + * @Input ui32NumPhysChunks - Number of physical chunks + * @Input pabMapChunk - Chunk mapping array + * @Output phBuf - receives the buffer handle. + * @Return IMG_TRUE - Success, IMG_FALSE - Failed. + */ +IMG_BOOL +BM_Alloc (IMG_HANDLE hDevMemHeap, + IMG_DEV_VIRTADDR *psDevVAddr, + IMG_SIZE_T uSize, + IMG_UINT32 *pui32Flags, + IMG_UINT32 uDevVAddrAlignment, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + BM_HANDLE *phBuf); + +/** + * @Function BM_Wrap + * + * @Description + * + * Create a buffer which wraps user provided host physical memory. + * The wrapped memory must be page aligned. BM_Wrap will roundup the + * size to a multiple of host pages. + * + * @Input ui32Size - size of memory to wrap. + * @Input ui32Offset - Offset into page of memory to wrap. + * @Input bPhysContig - Is the wrap physically contiguous. + * @Input psSysAddr - list of system physical page addresses of memory to wrap. + * @Input pvCPUVAddr - optional CPU kernel virtual address (Page aligned) of memory to wrap. + * @Input uFlags - bit mask of buffer property flags. + * @Input phBuf - receives the buffer handle. + * @Return IMG_TRUE - Success, IMG_FALSE - Failed + */ +IMG_BOOL +BM_Wrap ( IMG_HANDLE hDevMemHeap, + IMG_SIZE_T ui32Size, + IMG_SIZE_T ui32Offset, + IMG_BOOL bPhysContig, + IMG_SYS_PHYADDR *psSysAddr, + IMG_VOID *pvCPUVAddr, + IMG_UINT32 *pui32Flags, + BM_HANDLE *phBuf); + +/** + * @Function BM_Free + * + * @Description + * + * Free a buffer previously allocated via BM_Alloc. + * + * @Input hBuf - buffer handle. + * @Return None. + */ +IMG_VOID +BM_Free (BM_HANDLE hBuf, + IMG_UINT32 ui32Flags); + + +/** + * @Function BM_HandleToCpuVaddr + * + * @Description + * + * Retrieve the host virtual address associated with a buffer. + * + * @Input hBuf - buffer handle. + * + * @Return buffers host virtual address. + */ +IMG_CPU_VIRTADDR +BM_HandleToCpuVaddr (BM_HANDLE hBuf); + +/** + * @Function BM_HandleToDevVaddr + * + * @Description + * + * Retrieve the device virtual address associated with a buffer. + * + * @Input hBuf - buffer handle. + * @Return buffers device virtual address. + */ +IMG_DEV_VIRTADDR +BM_HandleToDevVaddr (BM_HANDLE hBuf); + +/** + * @Function BM_HandleToSysPaddr + * + * @Description + * + * Retrieve the system physical address associated with a buffer. + * + * @Input hBuf - buffer handle. + * @Return buffers device virtual address. + */ +IMG_SYS_PHYADDR +BM_HandleToSysPaddr (BM_HANDLE hBuf); + +/** + * @Function BM_HandleToMemOSHandle + * + * @Description + * + * Retrieve the underlying memory handle associated with a buffer. + * + * @Input hBuf - buffer handle. + * @Return An OS Specific memory handle + */ +IMG_HANDLE +BM_HandleToOSMemHandle (BM_HANDLE hBuf); + +/** + * @Function BM_RemapToDev + * + * @Description + * + * Remaps the device Virtual Mapping. + * + * @Input hBuf - buffer handle. + * @Return ref count on success + */ +IMG_INT32 +BM_RemapToDev(BM_HANDLE hBuf); + +/** + * @Function BM_UnmapFromDev + * + * @Description + * + * Removes the device Virtual Mapping. + * + * @Input hBuf - buffer handle. + * @Return Ref count on success + */ +IMG_INT32 +BM_UnmapFromDev(BM_HANDLE hBuf); + +/** + * @Function BM_GetPhysPageAddr + * + * @Description + * + * Retreive physical address backing dev V address + * + * @Input psMemInfo + * @Input sDevVPageAddr + * @Output psDevPAddr + * @Return PVRSRV_ERROR + */ +IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_DEV_VIRTADDR sDevVPageAddr, + IMG_DEV_PHYADDR *psDevPAddr); + +/*! +****************************************************************************** + @Function BM_GetMMUContext + + @Description + utility function to return the MMU context + + @inputs hDevMemHeap - the Dev mem heap handle + + @Return MMU context, else NULL +**************************************************************************/ +MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap); + +/*! +****************************************************************************** + @Function BM_GetMMUContextFromMemContext + + @Description + utility function to return the MMU context + + @inputs hDevMemHeap - the Dev mem heap handle + + @Return MMU context, else NULL +**************************************************************************/ +MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext); + +/*! +****************************************************************************** + @Function BM_GetMMUHeap + + @Description + utility function to return the MMU heap handle + + @inputs hDevMemHeap - the Dev mem heap handle + + @Return MMU heap handle, else NULL +**************************************************************************/ +IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap); + +/*! +****************************************************************************** + @Function BM_GetDeviceNode + + @Description utility function to return the devicenode from the BM Context + + @inputs hDevMemContext - the Dev Mem Context + + @Return MMU heap handle, else NULL +**************************************************************************/ +PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext); + + +/*! +****************************************************************************** + @Function BM_GetMappingHandle + + @Description utility function to return the mapping handle from a meminfo + + @inputs psMemInfo - kernel meminfo + + @Return mapping handle, else NULL +**************************************************************************/ +IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo); + +/*! +****************************************************************************** + @Function BM_Export + + @Description Export a buffer previously allocated via BM_Alloc. + + @inputs hBuf - buffer handle. + + @Return None. +**************************************************************************/ +IMG_VOID BM_Export(BM_HANDLE hBuf); + +/*! +****************************************************************************** + @Function BM_FreeExport + + @Description Free a buffer previously exported via BM_Export. + + @inputs hBuf - buffer handle. + ui32Flags - flags + + @Return None. +**************************************************************************/ +IMG_VOID BM_FreeExport(BM_HANDLE hBuf, IMG_UINT32 ui32Flags); + +/*! +****************************************************************************** + @Function BM_MappingHandleFromBuffer + + @Description utility function to get the BM mapping handle from a BM buffer + + @Input hBuffer - Handle to BM buffer + + @Return BM mapping handle +**************************************************************************/ +IMG_HANDLE BM_MappingHandleFromBuffer(IMG_HANDLE hBuffer); + +/*! +****************************************************************************** + @Function BM_GetVirtualSize + + @Description utility function to get the VM size of a BM mapping + + @Input hBMHandle - Handle to BM mapping + + @Return VM size of mapping +**************************************************************************/ +IMG_UINT32 BM_GetVirtualSize(IMG_HANDLE hBMHandle); + +/*! +****************************************************************************** + @Function BM_MapPageAtOffset + + @Description utility function check if the specificed offset in a BM mapping + is a page that needs tp be mapped + + @Input hBMHandle - Handle to BM mapping + + @Input ui32Offset - Offset into import + + @Return IMG_TRUE if the page should be mapped +**************************************************************************/ +IMG_BOOL BM_MapPageAtOffset(IMG_HANDLE hBMHandle, IMG_UINT32 ui32Offset); + +/*! +****************************************************************************** + @Function BM_VirtOffsetToPhyscial + + @Description utility function find of physical offset of a sparse allocation + from it's virtual offset. + + @Input hBMHandle - Handle to BM mapping + + @Input ui32VirtOffset - Virtual offset into allocation + + @Output pui32PhysOffset - Physical offset + + @Return IMG_TRUE if the virtual offset is physically backed +**************************************************************************/ +IMG_BOOL BM_VirtOffsetToPhysical(IMG_HANDLE hBMHandle, + IMG_UINT32 ui32VirtOffset, + IMG_UINT32 *pui32PhysOffset); + +/* The following are present for the "share mem" workaround for + cross-process mapping. This is only valid for a specific + use-case, and only tested on Linux (Android) and only + superficially at that. Do not rely on this API! */ +/* The two "Set" functions set a piece of "global" state in the buffer + manager, and "Unset" removes this global state. Therefore, there + is no thread-safety here and it's the caller's responsibility to + ensure that a mutex is acquired before using these functions or any + device memory allocation functions, including, especially, + callbacks from RA. */ +/* Once a "Share Index" is set by this means, any requests from the RA + to import a block of physical memory shall cause the physical + memory allocation to be refcounted, and shared iff the IDs chosen + match */ +/* This API is difficult to use, but saves a lot of plumbing in other + APIs. The next generation of this library should have this functionality + plumbed in properly */ +PVRSRV_ERROR BM_XProcWorkaroundSetShareIndex(IMG_UINT32 ui32Index); +PVRSRV_ERROR BM_XProcWorkaroundUnsetShareIndex(IMG_UINT32 ui32Index); +PVRSRV_ERROR BM_XProcWorkaroundFindNewBufferAndSetShareIndex(IMG_UINT32 *pui32Index); +IMG_INT32 BM_XProcGetShareDataRefCount(IMG_UINT32 ui32Index); + +#if defined(PVRSRV_REFCOUNT_DEBUG) +IMG_VOID _BM_XProcIndexAcquireDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index); +IMG_VOID _BM_XProcIndexReleaseDebug(const IMG_CHAR *pszFile, IMG_INT iLine, IMG_UINT32 ui32Index); + +#define BM_XProcIndexAcquire(x...) \ + _BM_XProcIndexAcquireDebug(__FILE__, __LINE__, x) +#define BM_XProcIndexRelease(x...) \ + _BM_XProcIndexReleaseDebug(__FILE__, __LINE__, x) + +#else +IMG_VOID _BM_XProcIndexAcquire(IMG_UINT32 ui32Index); +IMG_VOID _BM_XProcIndexRelease(IMG_UINT32 ui32Index); + +#define BM_XProcIndexAcquire(x) \ + _BM_XProcIndexAcquire( x) +#define BM_XProcIndexRelease(x) \ + _BM_XProcIndexRelease( x) +#endif + +static INLINE IMG_CHAR * +_BMMappingType (IMG_INT eCpuMemoryOrigin) +{ + switch (eCpuMemoryOrigin) + { + case hm_wrapped: return "hm_wrapped"; + case hm_wrapped_scatter: return "hm_wrapped_scatter"; + case hm_wrapped_virtaddr: return "hm_wrapped_virtaddr"; + case hm_wrapped_scatter_virtaddr: return "hm_wrapped_scatter_virtaddr"; + case hm_env: return "hm_env"; + case hm_contiguous: return "hm_contiguous"; + } + return "junk"; +} + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/pvr-source/services4/srvkm/include/device.h b/pvr-source/services4/srvkm/include/device.h new file mode 100644 index 0000000..6ddee5d --- /dev/null +++ b/pvr-source/services4/srvkm/include/device.h @@ -0,0 +1,409 @@ +/*************************************************************************/ /*! +@Title Common Device header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Device related function templates and defines +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __DEVICE_H__ +#define __DEVICE_H__ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "ra.h" /* RA_ARENA */ +#include "resman.h" /* PRESMAN_ITEM */ + +/* BM context forward reference */ +typedef struct _BM_CONTEXT_ BM_CONTEXT; + +/* pre-defined MMU structure forward references */ +typedef struct _MMU_HEAP_ MMU_HEAP; +typedef struct _MMU_CONTEXT_ MMU_CONTEXT; + +/* physical resource types: */ +/* contiguous system memory */ +#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0)) +/* non-contiguous system memory */ +#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1)) +/* contiguous local device memory */ +#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2)) +/* non-contiguous local device memory */ +#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3)) + +/* heap types: */ +typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE; +#define DEVICE_MEMORY_HEAP_PERCONTEXT 0 +#define DEVICE_MEMORY_HEAP_KERNEL 1 +#define DEVICE_MEMORY_HEAP_SHARED 2 +#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3 + +#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1 +#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2 /* FIXME : Optimal Invalidation is not default */ + +typedef struct _DEVICE_MEMORY_HEAP_INFO_ +{ + /* heap identifier */ + IMG_UINT32 ui32HeapID; + + /* heap identifier string */ + IMG_CHAR *pszName; + + /* backing store identifier string */ + IMG_CHAR *pszBSName; + + /* Device virtual address of base of heap */ + IMG_DEV_VIRTADDR sDevVAddrBase; + + /* heapsize in bytes */ + IMG_UINT32 ui32HeapSize; + + /* Flags, includes physical resource (backing store type). Must be available to SOC */ + IMG_UINT32 ui32Attribs; + + /* Heap type: per device, kernel only, shared, shared_exported */ + DEVICE_MEMORY_HEAP_TYPE DevMemHeapType; + + /* kernel heap handle */ + IMG_HANDLE hDevMemHeap; + + /* ptr to local memory allocator for this heap */ + RA_ARENA *psLocalDevMemArena; + + /* MMU data page size (4kb, 16kb, 256kb, 1Mb, 4Mb) */ + IMG_UINT32 ui32DataPageSize; + + IMG_UINT32 ui32XTileStride; + +} DEVICE_MEMORY_HEAP_INFO; + +typedef struct _DEVICE_MEMORY_INFO_ +{ + /* size of address space, as log2 */ + IMG_UINT32 ui32AddressSpaceSizeLog2; + + /* + flags, includes physical memory resource types available to the system. + Allows for validation at heap creation, define PVRSRV_BACKINGSTORE_XXX + */ + IMG_UINT32 ui32Flags; + + /* heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */ + IMG_UINT32 ui32HeapCount; + + /* the sync heap id - common code needs to know */ + IMG_UINT32 ui32SyncHeapID; + + /* heap for buffer mappings */ + IMG_UINT32 ui32MappingHeapID; + + /* heap for ion buffers */ + IMG_UINT32 ui32IonHeapID; + + /* device memory heap info about each heap in a device address space */ + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap; + + /* BM kernel context for the device */ + BM_CONTEXT *pBMKernelContext; + + /* BM context list for the device*/ + BM_CONTEXT *pBMContext; + +} DEVICE_MEMORY_INFO; + + +/*! + **************************************************************************** + Device memory descriptor for a given system + ****************************************************************************/ +typedef struct DEV_ARENA_DESCRIPTOR_TAG +{ + IMG_UINT32 ui32HeapID; /*!< memory pool has a unique id for diagnostic purposes */ + + IMG_CHAR *pszName; /*!< memory pool has a unique string for diagnostic purposes */ + + IMG_DEV_VIRTADDR BaseDevVAddr; /*!< Device virtual base address of the managed memory pool. */ + + IMG_UINT32 ui32Size; /*!< Size in bytes of the managed memory pool. */ + + DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;/*!< heap type */ + + /* MMU data page size (4kb, 16kb, 256kb, 1Mb, 4Mb) */ + IMG_UINT32 ui32DataPageSize; + + DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo; + +} DEV_ARENA_DESCRIPTOR; + + +/* + PDUMP MMU atttributes +*/ +typedef struct _PDUMP_MMU_ATTRIB_ +{ + PVRSRV_DEVICE_IDENTIFIER sDevId; + + IMG_CHAR *pszPDRegRegion; + + /* data page info */ + IMG_UINT32 ui32DataPageMask; + + /* page table info */ + IMG_UINT32 ui32PTEValid; + IMG_UINT32 ui32PTSize; + IMG_UINT32 ui32PTEAlignShift; + + /* page directory info */ + IMG_UINT32 ui32PDEMask; + IMG_UINT32 ui32PDEAlignShift; + +} PDUMP_MMU_ATTRIB; + +/* forward reference to _SYS_DATA_ */ +typedef struct _SYS_DATA_TAG_ *PSYS_DATA; + +typedef struct _PVRSRV_DEVICE_NODE_ +{ + PVRSRV_DEVICE_IDENTIFIER sDevId; + IMG_UINT32 ui32RefCount; + + /* + callbacks the device must support: + */ + /* device initialiser */ + PVRSRV_ERROR (*pfnInitDevice) (IMG_VOID*); + /* device deinitialiser */ + PVRSRV_ERROR (*pfnDeInitDevice) (IMG_VOID*); + + /* device post-finalise compatibility check */ + PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*); + + /* device MMU interface */ + PVRSRV_ERROR (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*); + IMG_VOID (*pfnMMUFinalise)(MMU_CONTEXT*); + IMG_VOID (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*); + MMU_HEAP* (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**,PDUMP_MMU_ATTRIB **ppsMMUAttrib); + IMG_VOID (*pfnMMUDelete)(MMU_HEAP*); + IMG_BOOL (*pfnMMUAlloc)(MMU_HEAP*pMMU, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + IMG_UINT32 uFlags, + IMG_UINT32 uDevVAddrAlignment, + IMG_DEV_VIRTADDR *pDevVAddr); + IMG_VOID (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,IMG_UINT32); + IMG_VOID (*pfnMMUEnable)(MMU_HEAP*); + IMG_VOID (*pfnMMUDisable)(MMU_HEAP*); + IMG_VOID (*pfnMMUMapPages)(MMU_HEAP *pMMU, + IMG_DEV_VIRTADDR devVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + IMG_VOID (*pfnMMUMapPagesSparse)(MMU_HEAP *pMMU, + IMG_DEV_VIRTADDR devVAddr, + IMG_SYS_PHYADDR SysPAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + + IMG_VOID (*pfnMMUMapShadow)(MMU_HEAP *pMMU, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_SIZE_T uSize, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR *pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + IMG_VOID (*pfnMMUMapShadowSparse)(MMU_HEAP *pMMU, + IMG_DEV_VIRTADDR MapBaseDevVAddr, + IMG_UINT32 ui32ChunkSize, + IMG_UINT32 ui32NumVirtChunks, + IMG_UINT32 ui32NumPhysChunks, + IMG_BOOL *pabMapChunk, + IMG_CPU_VIRTADDR CpuVAddr, + IMG_HANDLE hOSMemHandle, + IMG_DEV_VIRTADDR *pDevVAddr, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); + + IMG_VOID (*pfnMMUUnmapPages)(MMU_HEAP *pMMU, + IMG_DEV_VIRTADDR dev_vaddr, + IMG_UINT32 ui32PageCount, + IMG_HANDLE hUniqueTag); + + IMG_VOID (*pfnMMUMapScatter)(MMU_HEAP *pMMU, + IMG_DEV_VIRTADDR DevVAddr, + IMG_SYS_PHYADDR *psSysAddr, + IMG_SIZE_T uSize, + IMG_UINT32 ui32MemFlags, + IMG_HANDLE hUniqueTag); +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + IMG_BOOL (*pfnMMUIsHeapShared)(MMU_HEAP *); +#endif + IMG_DEV_PHYADDR (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr); + IMG_DEV_PHYADDR (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext); + IMG_VOID (*pfnMMUGetCacheFlushRange)(MMU_CONTEXT *pMMUContext, IMG_UINT32 *pui32RangeMask); + IMG_VOID (*pfnMMUGetPDPhysAddr)(MMU_CONTEXT *pMMUContext, IMG_DEV_PHYADDR *psDevPAddr); + + /* tiling range control functions */ + PVRSRV_ERROR (*pfnAllocMemTilingRange)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32TilingStride, + IMG_UINT32 *pui32RangeIndex); + PVRSRV_ERROR (*pfnFreeMemTilingRange)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, + IMG_UINT32 ui32RangeIndex); + + /* LISR handler for device */ + IMG_BOOL (*pfnDeviceISR)(IMG_VOID*); + /* ISR data */ + IMG_VOID *pvISRData; + /* System/SOC specific interrupt bit relating to this device */ + IMG_UINT32 ui32SOCInterruptBit; + /* MISR handler for device */ + IMG_VOID (*pfnDeviceMISR)(IMG_VOID*); + + /* Software command complete callback for device */ + IMG_VOID (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + /* Flag indicating that command complete callback needs to be reprocessed */ + IMG_BOOL bReProcessDeviceCommandComplete; + + IMG_VOID (*pfnCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + + /* information about the device's address space and heaps */ + DEVICE_MEMORY_INFO sDevMemoryInfo; + + /* private device information */ + IMG_VOID *pvDevice; + IMG_UINT32 ui32pvDeviceSize; /* required by GetClassDeviceInfo API */ + + /* Resource Manager Context */ + PRESMAN_CONTEXT hResManContext; + + /* pointer back to parent sysdata */ + PSYS_DATA psSysData; + + /* default MMU PT/PD backing store to use for the device */ + RA_ARENA *psLocalDevMemArena; + + IMG_UINT32 ui32Flags; + + struct _PVRSRV_DEVICE_NODE_ *psNext; + struct _PVRSRV_DEVICE_NODE_ **ppsThis; + +#if defined(PDUMP) + /* device-level callback which is called when pdump.exe starts. + * Should be implemented in device-specific init code, e.g. sgxinit.c + */ + PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); + /* device-level callback to return pdump ID associated to a memory context */ + IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext); +#endif +} PVRSRV_DEVICE_NODE; + +PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData, + PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*), + IMG_UINT32 ui32SOCInterruptBit, + IMG_UINT32 *pui32DeviceIndex ); + +PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex); +PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful); + +PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); + +PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex); + +#if !defined(USE_CODE) + +/*! +****************************************************************************** + + @Function PollForValueKM + + @Description + Polls for a value to match a masked read of sysmem + + @Input pui32LinMemAddr : CPU linear address of the mem to poll + @Input ui32Value : req'd value + @Input ui32Mask : Mask + @Input ui32Timeoutus : maximum total time to wait (us) + @Input ui32PollPeriodus : minimum delay between consecutive polls (us) + @Input bAllowPreemption : allow the polling loop to be preempted + + @Return PVRSRV_ERROR : + +******************************************************************************/ +IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Timeoutus, + IMG_UINT32 ui32PollPeriodus, + IMG_BOOL bAllowPreemption); + +#endif /* !defined(USE_CODE) */ + + +#if defined (USING_ISR_INTERRUPTS) +PVRSRV_ERROR IMG_CALLCONV PollForInterruptKM(IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Waitus, + IMG_UINT32 ui32Tries); + +#endif /* #if defined (USING_ISR_INTERRUPTS) */ + +/* The following functions don't really belong here (srvkm.h might be a better + * place), but as they use the device data structures, this is the most convenient + * place for them. */ +PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData); +IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData); +IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode); +IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData); +IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData); + +#if defined(__cplusplus) +} +#endif + +#endif /* __DEVICE_H__ */ + +/****************************************************************************** + End of file (device.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/handle.h b/pvr-source/services4/srvkm/include/handle.h new file mode 100644 index 0000000..157d209 --- /dev/null +++ b/pvr-source/services4/srvkm/include/handle.h @@ -0,0 +1,567 @@ +/*************************************************************************/ /*! +@Title Handle Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide handle management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __HANDLE_H__ +#define __HANDLE_H__ + +/* + * Handle API + * ---------- + * The handle API is intended to provide handles for kernel resources, + * which can then be passed back to user space processes. + * + * The following functions comprise the API. Each function takes a + * pointer to a PVRSRV_HANDLE_BASE strcture, one of which is allocated + * for each process, and stored in the per-process data area. Use + * KERNEL_HANDLE_BASE for handles not allocated for a particular process, + * or for handles that need to be allocated before the PVRSRV_HANDLE_BASE + * structure for the process is available. + * + * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag); + * + * Allocate a handle phHandle, for the resource of type eType pointed to by + * pvData. + * + * For handles that have a definite lifetime, where the corresponding + * resource is explicitly created and destroyed, eFlag should be zero. + * + * If the resource is not explicitly created and destroyed, eFlag should be + * set to PVRSRV_HANDLE_ALLOC_FLAG_SHARED. For a given process, the same + * handle will be returned each time a handle for the resource is allocated + * with the PVRSRV_HANDLE_ALLOC_FLAG_SHARED flag. + * + * If a particular resource may be referenced multiple times by a + * given process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI + * will allow multiple handles to be allocated for the resource. + * Such handles cannot be found with PVRSRVFindHandle. + * + * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, + * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + * + * This function is similar to PVRSRVAllocHandle, except that the allocated + * handles are associated with a parent handle, hParent, that has been + * allocated previously. Subhandles are automatically deallocated when their + * parent handle is dealloacted. + * Subhandles can be treated as ordinary handles. For example, they may + * have subhandles of their own, and may be explicity deallocated using + * PVRSRVReleaseHandle (see below). + * + * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType); + * + * Find the handle previously allocated for the resource pointed to by + * pvData, of type eType. Handles allocated with the flag + * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this + * function. + * + * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Given a handle for a resource of type eType, return the pointer to the + * resource. + * + * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, + * IMH_HANDLE hAncestor); + * + * Similar to PVRSRVLookupHandle, but checks the handle is a descendent + * of hAncestor. + * + * PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle); + * + * This function returns the resource pointer corresponding to the + * given handle, and the resource type in peType. This function is + * intended for situations where a handle may be one of several types, + * but the type isn't known beforehand. + * + * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Deallocate a handle of given type. + * + * PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * This function combines the functionality of PVRSRVLookupHandle and + * PVRSRVReleaseHandle, deallocating the handle after looking it up. + * + * PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + * + * Return the parent of a handle in *phParent, or IMG_NULL if the handle has + * no parent. + * + * PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, + * IMG_UINT32 ui32BatchSize) + * + * Allocate a new handle batch. This preallocates ui32BatchSize handles. + * Batch mode simplifies the handling of handle allocation failures. + * The handle API is unchanged in batch mode, except that handles freed + * in batch mode will not be available for reallocation until the batch + * is committed or released (see below). + * + * PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase) + * void PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase) + * + * When handle allocation from a handle batch is complete, the + * batch must be committed by calling PVRSRVCommitHandleBatch. If + * an error occurred, and none of the handles in the batch are no + * longer needed, PVRSRVReleaseHandleBatch must be called. + * The macros PVRSRVAllocHandleNR, and PVRSRVAllocSubHandleNR + * are defined for use in batch mode. These work the same way + * as PVRSRVAllocHandle and PVRSRVAllocSubHandle, except that + * they don't return a value, relying on the fact that + * PVRSRVCommitHandleBatch will not commit any of the handles + * in a batch if there was an error allocating one of the + * handles in the batch. + * + * PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, + * IMG_UINT32 ui32MaxHandle) + * Set the maximum handle number. This is intended to restrict the + * handle range so that it will fit within a given field width. For + * example, setting the maximum handle number to 0x7fffffff, would + * ensure the handles would fit within a 31 bit width field. This + * facility should be used with caution, as it restricts the number of + * handles that can be allocated. + * + * IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase) + * Return the maximum handle number, or 0 if the setting of a limit + * is not supported. + * + * PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase) + * Allows unused handle space to be reclaimed, by calling + * PVRSRVPurgeHandles. Note that allocating handles may have a + * higher overhead if purging is enabled. + * + * PVRSRV_ERROR PVRSRVPurgeHandles((PVRSRV_HANDLE_BASE *psBase) + * Purge handles for a handle base that has purging enabled. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "img_types.h" +#include "hash.h" +#include "resman.h" + +typedef enum +{ + PVRSRV_HANDLE_TYPE_NONE = 0, + PVRSRV_HANDLE_TYPE_PERPROC_DATA, + PVRSRV_HANDLE_TYPE_DEV_NODE, + PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT, + PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP, + PVRSRV_HANDLE_TYPE_MEM_INFO, + PVRSRV_HANDLE_TYPE_SYNC_INFO, + PVRSRV_HANDLE_TYPE_DISP_INFO, + PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN, + PVRSRV_HANDLE_TYPE_BUF_INFO, + PVRSRV_HANDLE_TYPE_DISP_BUFFER, + PVRSRV_HANDLE_TYPE_BUF_BUFFER, + PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT, + PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT, + PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT, + PVRSRV_HANDLE_TYPE_SHARED_PB_DESC, + PVRSRV_HANDLE_TYPE_MEM_INFO_REF, + PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO, + PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, + PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, + PVRSRV_HANDLE_TYPE_MMAP_INFO, + PVRSRV_HANDLE_TYPE_SOC_TIMER, + PVRSRV_HANDLE_TYPE_SYNC_INFO_MOD_OBJ, + PVRSRV_HANDLE_TYPE_RESITEM_INFO +} PVRSRV_HANDLE_TYPE; + +typedef enum +{ + /* No flags */ + PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0, + /* Share a handle that already exists for a given data pointer */ + PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01, + /* Muliple handles can point at the given data pointer */ + PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02, + /* Subhandles are allocated in a private handle space */ + PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04 +} PVRSRV_HANDLE_ALLOC_FLAG; + +struct _PVRSRV_HANDLE_BASE_; +typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE; + +#if defined (PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE) +extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; + +#define KERNEL_HANDLE_BASE (gpsKernelHandleBase) + +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag); + +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_SID hParent); + +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_SID hHandle); + +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType, IMG_SID hAncestor); + +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID *phParent, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_SID hHandle, PVRSRV_HANDLE_TYPE eType); +#else +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag); + +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); + +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle); + +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor); + +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); + +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); +#endif /* #if defined (SUPPORT_SID_INTERFACE) */ + +PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize); + +PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase); + +IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle); + +IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase); + +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase); + +PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID); + +PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID); + +#else /* #if defined (PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)*/ + +#define KERNEL_HANDLE_BASE IMG_NULL + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVAllocHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(eFlag); + PVR_UNREFERENCED_PARAMETER(psBase); + + *phHandle = pvData; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVAllocSubHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(eFlag); + PVR_UNREFERENCED_PARAMETER(hParent); + PVR_UNREFERENCED_PARAMETER(psBase); + + *phHandle = pvData; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFindHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(psBase); + + *phHandle = pvData; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupHandleAnyType) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + /* + * Unlike the other functions here, the returned results will need + * to be handled differently for the secure and non-secure cases. + */ + *peType = PVRSRV_HANDLE_TYPE_NONE; + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(eType); + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupSubHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(hAncestor); + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetParentHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(hHandle); + + *phParent = IMG_NULL; + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVLookupAndReleaseHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(psBase); + + *ppvData = hHandle; + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVReleaseHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType) +{ + PVR_UNREFERENCED_PARAMETER(hHandle); + PVR_UNREFERENCED_PARAMETER(eType); + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVNewHandleBatch) +#endif +static INLINE +PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(ui32BatchSize); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVCommitHandleBatch) +#endif +static INLINE +PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVReleaseHandleBatch) +#endif +static INLINE +IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVSetMaxHandle) +#endif +static INLINE +PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + PVR_UNREFERENCED_PARAMETER(ui32MaxHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVGetMaxHandle) +#endif +static INLINE +IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return 0; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVEnableHandlePurging) +#endif +static INLINE +PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPurgeHandles) +#endif +static INLINE +PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVAllocHandleBase) +#endif +static INLINE +PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase) +{ + *ppsBase = IMG_NULL; + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFreeHandleBase) +#endif +static INLINE +PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase) +{ + PVR_UNREFERENCED_PARAMETER(psBase); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVHandleInit) +#endif +static INLINE +PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID) +{ + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVHandleDeInit) +#endif +static INLINE +PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID) +{ + return PVRSRV_OK; +} + +#endif /* #if defined (PVR_SECURE_HANDLES) || defined (SUPPORT_SID_INTERFACE)*/ + +/* + * Versions of PVRSRVAllocHandle and PVRSRVAllocSubHandle with no return + * values. Intended for use with batched handle allocation, relying on + * CommitHandleBatch to detect handle allocation errors. + */ +#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \ + (IMG_VOID)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag) + +#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \ + (IMG_VOID)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent) + +#if defined (__cplusplus) +} +#endif + +#endif /* __HANDLE_H__ */ + +/****************************************************************************** + End of file (handle.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/hash.h b/pvr-source/services4/srvkm/include/hash.h new file mode 100644 index 0000000..1ed6fd0 --- /dev/null +++ b/pvr-source/services4/srvkm/include/hash.h @@ -0,0 +1,277 @@ +/*************************************************************************/ /*! +@Title Self scaling hash tables +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Implements simple self scaling hash tables. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _HASH_H_ +#define _HASH_H_ + +#include "img_types.h" +#include "osfunc.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +/* + * Keys passed to the comparsion function are only guaranteed to + * be aligned on an IMG_UINTPTR_T boundary. + */ +typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen); +typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2); + +typedef struct _HASH_TABLE_ HASH_TABLE; + +typedef PVRSRV_ERROR (*HASH_pfnCallback) ( + IMG_UINTPTR_T k, + IMG_UINTPTR_T v +); + +/*! +****************************************************************************** + @Function HASH_Func_Default + + @Description Hash function intended for hashing keys composed of + IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey - a pointer to the key to hash. + @Input uHashTabLen - the length of the hash table. + + @Return The hash value. +******************************************************************************/ +IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen); + +/*! +****************************************************************************** + @Function HASH_Key_Comp_Default + + @Description Compares keys composed of IMG_UINTPTR_T arrays. + + @Input uKeySize - the size of the hash key, in bytes. + @Input pKey1 - pointer to first hash key to compare. + @Input pKey2 - pointer to second hash key to compare. + + @Return IMG_TRUE - the keys match. + IMG_FALSE - the keys don't match. +******************************************************************************/ +IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2); + +/*! +****************************************************************************** + @Function HASH_Create_Extended + + @Description Create a self scaling hash table, using the supplied + key size, and the supllied hash and key comparsion + functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the number + of entries in the hash table, not its size in + bytes. + @Input uKeySize - the size of the key, in bytes. + @Input pfnHashFunc - pointer to hash function. + @Input pfnKeyComp - pointer to key comparsion function. + + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp); + +/*! +****************************************************************************** + @Function HASH_Create + + @Description Create a self scaling hash table with a key + consisting of a single IMG_UINTPTR_T, and using + the default hash and key comparison functions. + + @Input uInitialLen - initial and minimum length of the + hash table, where the length refers to the + number of entries in the hash table, not its size + in bytes. + + @Return IMG_NULL or hash table handle. +******************************************************************************/ +HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen); + +/*! +****************************************************************************** + @Function HASH_Delete + + @Description Delete a hash table created by HASH_Create_Extended or + HASH_Create. All entries in the table must have been + removed before calling this function. + + @Input pHash - hash table + + @Return None +******************************************************************************/ +IMG_VOID HASH_Delete (HASH_TABLE *pHash); + +/*! +****************************************************************************** + @Function HASH_Insert_Extended + + @Description Insert a key value pair into a hash table created + with HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success. + IMG_FALSE - failure. +******************************************************************************/ +IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v); + +/*! +****************************************************************************** + @Function HASH_Insert + + @Description Insert a key value pair into a hash table created with + HASH_Create. + + @Input pHash - the hash table. + @Input k - the key value. + @Input v - the value associated with the key. + + @Return IMG_TRUE - success. + IMG_FALSE - failure. +******************************************************************************/ +IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v); + +/*! +****************************************************************************** + @Function HASH_Remove_Extended + + @Description Remove a key from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to key. + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey); + +/*! +****************************************************************************** + @Function HASH_Remove + + @Description Remove a key value pair from a hash table created + with HASH_Create. + + @Input pHash - the hash table + @Input k - the key + + @Return 0 if the key is missing, or the value associated + with the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k); + +/*! +****************************************************************************** + @Function HASH_Retrieve_Extended + + @Description Retrieve a value from a hash table created with + HASH_Create_Extended. + + @Input pHash - the hash table. + @Input pKey - pointer to the key. + + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey); + +/*! +****************************************************************************** + @Function HASH_Retrieve + + @Description Retrieve a value from a hash table created with + HASH_Create. + + @Input pHash - the hash table + @Input k - the key + + @Return 0 if the key is missing, or the value associated with + the key. +******************************************************************************/ +IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k); + +/*! +****************************************************************************** + @Function HASH_Interate + + @Description Iterate over every entry in the hash table + + @Input pHash - the old hash table + @Input HASH_pfnCallback - the size of the old hash table + + @Return Callback error if any, otherwise PVRSRV_OK +******************************************************************************/ +PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback); + +#ifdef HASH_TRACE +/*! +****************************************************************************** + @Function HASH_Dump + + @Description Dump out some information about a hash table. + + @Input pHash - the hash table + + @Return None +******************************************************************************/ +IMG_VOID HASH_Dump (HASH_TABLE *pHash); +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* _HASH_H_ */ + +/****************************************************************************** + End of file (hash.h) +******************************************************************************/ + + diff --git a/pvr-source/services4/srvkm/include/lists.h b/pvr-source/services4/srvkm/include/lists.h new file mode 100644 index 0000000..81205de --- /dev/null +++ b/pvr-source/services4/srvkm/include/lists.h @@ -0,0 +1,349 @@ +/*************************************************************************/ /*! +@Title Linked list shared functions templates. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Definition of the linked list function templates. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __LISTS_UTILS__ +#define __LISTS_UTILS__ + +/* instruct QAC to ignore warnings about the following custom formatted macros */ +/* PRQA S 0881,3410 ++ */ +#include <stdarg.h> +#include "img_types.h" + +/* + - USAGE - + + The list functions work with any structure that provides the fields psNext and + ppsThis. In order to make a function available for a given type, it is required + to use the funcion template macro that creates the actual code. + + There are 4 main types of functions: + - INSERT : given a pointer to the head pointer of the list and a pointer to + the node, inserts it as the new head. + - REMOVE : given a pointer to a node, removes it from its list. + - FOR EACH : apply a function over all the elements of a list. + - ANY : apply a function over the elements of a list, until one of them + return a non null value, and then returns it. + + The two last functions can have a variable argument form, with allows to pass + additional parameters to the callback function. In order to do this, the + callback function must take two arguments, the first is the current node and + the second is a list of variable arguments (va_list). + + The ANY functions have also another for wich specifies the return type of the + callback function and the default value returned by the callback function. + +*/ + +/*! +****************************************************************************** + @Function List_##TYPE##_ForEach + + @Description Apply a callback function to all the elements of a list. + + @Input psHead - the head of the list to be processed. + @Input pfnCallBack - the function to be applied to each element + of the list. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_FOR_EACH(TYPE) \ +IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_FOR_EACH(TYPE) \ +IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\ +{\ + while(psHead)\ + {\ + pfnCallBack(psHead);\ + psHead = psHead->psNext;\ + }\ +} + + +#define DECLARE_LIST_FOR_EACH_VA(TYPE) \ +IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \ +IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) \ +{\ + va_list ap;\ + while(psHead)\ + {\ + va_start(ap, pfnCallBack);\ + pfnCallBack(psHead, ap);\ + psHead = psHead->psNext;\ + va_end(ap);\ + }\ +} + + +/*! +****************************************************************************** + @Function List_##TYPE##_Any + + @Description Applies a callback function to the elements of a list until + the function returns a non null value, then returns it. + + @Input psHead - the head of the list to be processed. + @Input pfnCallBack - the function to be applied to each element + of the list. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_ANY(TYPE) \ +IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY(TYPE) \ +IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))\ +{ \ + IMG_VOID *pResult;\ + TYPE *psNextNode;\ + pResult = IMG_NULL;\ + psNextNode = psHead;\ + while(psHead && !pResult)\ + {\ + psNextNode = psNextNode->psNext;\ + pResult = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + + +/*with variable arguments, that will be passed as a va_list to the callback function*/ + +#define DECLARE_LIST_ANY_VA(TYPE) \ +IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA(TYPE) \ +IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + IMG_VOID* pResult = IMG_NULL;\ + while(psHead && !pResult)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + pResult = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return pResult;\ +} + +/*those ones are for extra type safety, so there's no need to use castings for the results*/ + +#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode)) + +#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\ +{ \ + RTYPE result;\ + TYPE *psNextNode;\ + result = CONTINUE;\ + psNextNode = psHead;\ + while(psHead && result == CONTINUE)\ + {\ + psNextNode = psNextNode->psNext;\ + result = pfnCallBack(psHead);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...) + +#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ +RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ +{\ + va_list ap;\ + TYPE *psNextNode;\ + RTYPE result = CONTINUE;\ + while(psHead && result == CONTINUE)\ + {\ + psNextNode = psHead->psNext;\ + va_start(ap, pfnCallBack);\ + result = pfnCallBack(psHead, ap);\ + va_end(ap);\ + psHead = psNextNode;\ + }\ + return result;\ +} + + +/*! +****************************************************************************** + @Function List_##TYPE##_Remove + + @Description Removes a given node from the list. + + @Input psNode - the pointer to the node to be removed. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_REMOVE(TYPE) \ +IMG_VOID List_##TYPE##_Remove(TYPE *psNode) + +#define IMPLEMENT_LIST_REMOVE(TYPE) \ +IMG_VOID List_##TYPE##_Remove(TYPE *psNode)\ +{\ + (*psNode->ppsThis)=psNode->psNext;\ + if(psNode->psNext)\ + {\ + psNode->psNext->ppsThis = psNode->ppsThis;\ + }\ +} + +/*! +****************************************************************************** + @Function List_##TYPE##_Insert + + @Description Inserts a given node at the beginnning of the list. + + @Input psHead - The pointer to the pointer to the head node. + @Input psNode - The pointer to the node to be inserted. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_INSERT(TYPE) \ +IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode) + +#define IMPLEMENT_LIST_INSERT(TYPE) \ +IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\ +{\ + psNewNode->ppsThis = ppsHead;\ + psNewNode->psNext = *ppsHead;\ + *ppsHead = psNewNode;\ + if(psNewNode->psNext)\ + {\ + psNewNode->psNext->ppsThis = &(psNewNode->psNext);\ + }\ +} + +/*! +****************************************************************************** + @Function List_##TYPE##_Reverse + + @Description Reverse a list in place + + @Input ppsHead - The pointer to the pointer to the head node. + + @Return None +******************************************************************************/ +#define DECLARE_LIST_REVERSE(TYPE) \ +IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead) + +#define IMPLEMENT_LIST_REVERSE(TYPE) \ +IMG_VOID List_##TYPE##_Reverse(TYPE **ppsHead)\ +{\ + TYPE *psTmpNode1; \ + TYPE *psTmpNode2; \ + TYPE *psCurNode; \ + psTmpNode1 = IMG_NULL; \ + psCurNode = *ppsHead; \ + while(psCurNode) { \ + psTmpNode2 = psCurNode->psNext; \ + psCurNode->psNext = psTmpNode1; \ + psTmpNode1 = psCurNode; \ + psCurNode = psTmpNode2; \ + if(psCurNode) \ + { \ + psTmpNode1->ppsThis = &(psCurNode->psNext); \ + } \ + else \ + { \ + psTmpNode1->ppsThis = ppsHead; \ + } \ + } \ + *ppsHead = psTmpNode1; \ +} + +#define IS_LAST_ELEMENT(x) ((x)->psNext == IMG_NULL) + +#include "services_headers.h" + +DECLARE_LIST_ANY_VA(BM_HEAP); +DECLARE_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH_VA(BM_HEAP); +DECLARE_LIST_REMOVE(BM_HEAP); +DECLARE_LIST_INSERT(BM_HEAP); + +DECLARE_LIST_ANY_VA(BM_CONTEXT); +DECLARE_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL); +DECLARE_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH(BM_CONTEXT); +DECLARE_LIST_REMOVE(BM_CONTEXT); +DECLARE_LIST_INSERT(BM_CONTEXT); + +DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE); +DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE); +DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE); +DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE); + +DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV); +DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK); +DECLARE_LIST_INSERT(PVRSRV_POWER_DEV); +DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV); + +#undef DECLARE_LIST_ANY_2 +#undef DECLARE_LIST_ANY_VA +#undef DECLARE_LIST_ANY_VA_2 +#undef DECLARE_LIST_FOR_EACH +#undef DECLARE_LIST_FOR_EACH_VA +#undef DECLARE_LIST_INSERT +#undef DECLARE_LIST_REMOVE + +IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va); +IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va); + +#endif + +/* re-enable warnings */ +/* PRQA S 0881,3410 -- */ diff --git a/pvr-source/services4/srvkm/include/metrics.h b/pvr-source/services4/srvkm/include/metrics.h new file mode 100644 index 0000000..18079cb --- /dev/null +++ b/pvr-source/services4/srvkm/include/metrics.h @@ -0,0 +1,146 @@ +/*************************************************************************/ /*! +@Title Time measurement interface. +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _METRICS_ +#define _METRICS_ + + +#if defined (__cplusplus) +extern "C" { +#endif + + +#if defined(DEBUG) || defined(TIMING) + + +typedef struct +{ + IMG_UINT32 ui32Start; + IMG_UINT32 ui32Stop; + IMG_UINT32 ui32Total; + IMG_UINT32 ui32Count; +} Temporal_Data; + +extern Temporal_Data asTimers[]; + +extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID); +extern IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo); +extern IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID); + + +#define PVRSRV_TIMER_DUMMY 0 + +#define PVRSRV_TIMER_EXAMPLE_1 1 +#define PVRSRV_TIMER_EXAMPLE_2 2 + + +#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1) + +#define PVRSRV_TIME_START(X) { \ + asTimers[X].ui32Count += 1; \ + asTimers[X].ui32Count |= 0x80000000L; \ + asTimers[X].ui32Start = PVRSRVTimeNow(); \ + asTimers[X].ui32Stop = 0; \ + } + +#define PVRSRV_TIME_SUSPEND(X) { \ + asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \ + } + +#define PVRSRV_TIME_RESUME(X) { \ + asTimers[X].ui32Start = PVRSRVTimeNow(); \ + } + +#define PVRSRV_TIME_STOP(X) { \ + asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \ + asTimers[X].ui32Total += asTimers[X].ui32Stop; \ + asTimers[X].ui32Count &= 0x7FFFFFFFL; \ + } + +#define PVRSRV_TIME_RESET(X) { \ + asTimers[X].ui32Start = 0; \ + asTimers[X].ui32Stop = 0; \ + asTimers[X].ui32Total = 0; \ + asTimers[X].ui32Count = 0; \ + } + + +#if defined(__sh__) + +#define TST_REG ((volatile IMG_UINT8 *) (psDevInfo->pvSOCRegsBaseKM)) // timer start register + +#define TCOR_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+28)) // timer constant register_2 +#define TCNT_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+32)) // timer counter register_2 +#define TCR_2 ((volatile IMG_UINT16 *)(psDevInfo->pvSOCRegsBaseKM+36)) // timer control register_2 + +#define TIMER_DIVISOR 4 + +#endif /* defined(__sh__) */ + + + +#else /* defined(DEBUG) || defined(TIMING) */ + + + +#define PVRSRV_TIME_START(X) +#define PVRSRV_TIME_SUSPEND(X) +#define PVRSRV_TIME_RESUME(X) +#define PVRSRV_TIME_STOP(X) +#define PVRSRV_TIME_RESET(X) + +#define PVRSRVSetupMetricTimers(X) +#define PVRSRVOutputMetricTotals() + + + +#endif /* defined(DEBUG) || defined(TIMING) */ + +#if defined(__cplusplus) +} +#endif + + +#endif /* _METRICS_ */ + +/************************************************************************** + End of file (metrics.h) +**************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/osfunc.h b/pvr-source/services4/srvkm/include/osfunc.h new file mode 100644 index 0000000..dcaa58a --- /dev/null +++ b/pvr-source/services4/srvkm/include/osfunc.h @@ -0,0 +1,799 @@ +/*************************************************************************/ /*! +@Title OS functions header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS specific API definitions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifdef DEBUG_RELEASE_BUILD +#pragma optimize( "", off ) +#define DEBUG 1 +#endif + +#ifndef __OSFUNC_H__ +#define __OSFUNC_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(__linux__) && defined(__KERNEL__) +#include <linux/hardirq.h> +#include <linux/string.h> +#include <asm/system.h> +#if defined(__arm__) +#include <asm/memory.h> +#endif +#endif + + +/* setup conditional pageable / non-pageable select */ + /* Other OSs only need pageable */ + #define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP + +/****************************************************************************** + * Static defines + *****************************************************************************/ +#define KERNEL_ID 0xffffffffL +#define POWER_MANAGER_ID 0xfffffffeL +#define ISR_ID 0xfffffffdL +#define TIMER_ID 0xfffffffcL + + +#define HOST_PAGESIZE OSGetPageSize +#define HOST_PAGEMASK (HOST_PAGESIZE()-1) +#define HOST_PAGEALIGN(addr) (((addr) + HOST_PAGEMASK) & ~HOST_PAGEMASK) + +/****************************************************************************** + * Host memory heaps + *****************************************************************************/ +#define PVRSRV_OS_HEAP_MASK 0xf /* host heap flags mask */ +#define PVRSRV_OS_PAGEABLE_HEAP 0x1 /* allocation pageable */ +#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2 /* allocation non pageable */ + + +IMG_UINT32 OSClockus(IMG_VOID); +IMG_SIZE_T OSGetPageSize(IMG_VOID); +PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData, + IMG_UINT32 ui32Irq, + IMG_CHAR *pszISRName, + IMG_VOID *pvDeviceNode); +PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData); +PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq); +PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData); +PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData); +PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData); +IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_HANDLE, IMG_VOID* pvLinAddr); +IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_SIZE_T ui32Size); +IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle); +IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle); + +PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hBMHandle, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle); +PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle); + +/* Some terminology: + * + * FLUSH Flush w/ invalidate + * CLEAN Flush w/o invalidate + * INVALIDATE Invalidate w/o flush + */ + +#if defined(__linux__) && defined(__KERNEL__) + +IMG_VOID OSFlushCPUCacheKM(IMG_VOID); + +IMG_VOID OSCleanCPUCacheKM(IMG_VOID); + +IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length); +IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length); +IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length); + +#else /* defined(__linux__) && defined(__KERNEL__) */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSFlushCPUCacheKM) +#endif +static INLINE IMG_VOID OSFlushCPUCacheKM(IMG_VOID) {} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSCleanCPUCacheKM) +#endif +static INLINE IMG_VOID OSCleanCPUCacheKM(IMG_VOID) {} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSFlushCPUCacheRangeKM) +#endif +static INLINE IMG_BOOL OSFlushCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart); + PVR_UNREFERENCED_PARAMETER(ui32Length); + return IMG_FALSE; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSCleanCPUCacheRangeKM) +#endif +static INLINE IMG_BOOL OSCleanCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart); + PVR_UNREFERENCED_PARAMETER(ui32Length); + return IMG_FALSE; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSInvalidateCPUCacheRangeKM) +#endif +static INLINE IMG_BOOL OSInvalidateCPUCacheRangeKM(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32ByteOffset, + IMG_VOID *pvRangeAddrStart, + IMG_UINT32 ui32Length) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart); + PVR_UNREFERENCED_PARAMETER(ui32Length); + return IMG_FALSE; +} + +#endif /* defined(__linux__) && defined(__KERNEL__) */ + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, + IMG_VOID *pvCpuVAddr, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandle); +PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hOSMemHandle); +#else /* defined(__linux__) */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSRegisterDiscontigMem) +#endif +static INLINE PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, + IMG_VOID *pvCpuVAddr, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pBasePAddr); + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(phOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSUnRegisterDiscontigMem) +#endif +static INLINE PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} +#endif /* defined(__linux__) */ + + +#if defined(__linux__) || defined(__QNXNTO__) +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReserveDiscontigPhys) +#endif +static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle) +{ +#if defined(__linux__) || defined(__QNXNTO__) + *ppvCpuVAddr = IMG_NULL; + return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, ui32Bytes, ui32Flags, phOSMemHandle); +#else + extern IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR SysPAddr); + + /* + * On uITRON we know: + * 1. We will only be called with a non-contig physical if we + * already have a contiguous CPU linear + * 2. There is a one->one mapping of CpuPAddr -> CpuVAddr + * 3. Looking up the first CpuPAddr will find the first CpuVAddr + * 4. We don't need to unmap + */ + + return OSReservePhys(SysSysPAddrToCpuPAddr(pBasePAddr[0]), ui32Bytes, ui32Flags, IMG_NULL, ppvCpuVAddr, phOSMemHandle); +#endif +} + +static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle) +{ +#if defined(__linux__) || defined(__QNXNTO__) + OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle); +#endif + /* We don't need to unmap */ + return PVRSRV_OK; +} +#else /* defined(__linux__) */ + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReserveDiscontigPhys) +#endif +static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pBasePAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(ppvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(phOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSUnReserveDiscontigPhys) +#endif +static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(pvCpuVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + + return PVRSRV_ERROR_NOT_SUPPORTED; +} +#endif /* defined(__linux__) */ + +PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, + IMG_VOID *pvCpuVAddr, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandle); +PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hOSMemHandle); + + + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, + IMG_UINTPTR_T ui32ByteOffset, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandleRet); +PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSGetSubMemHandle) +#endif +static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle, + IMG_UINTPTR_T ui32ByteOffset, + IMG_SIZE_T ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE *phOSMemHandleRet) +{ + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + + *phOSMemHandleRet = hOSMemHandle; + return PVRSRV_OK; +} + +static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32Flags); + return PVRSRV_OK; +} +#endif + +IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID); +int OSGetProcCmdline(IMG_UINT32 ui32PID, char * buffer, int buff_size); +const char* OSGetPathBaseName(char * buffer, int buff_size); +IMG_UINTPTR_T OSGetCurrentThreadID( IMG_VOID ); +IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size); + +PVRSRV_ERROR OSAllocPages_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_UINT32 ui32PageSize, + IMG_PVOID pvPrivData, IMG_UINT32 ui32PrivDataLength, IMG_HANDLE hBMHandle, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc); +PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc); + +IMG_INT32 +OSGetMemMultiPlaneInfo(IMG_HANDLE hOSMemHandle, IMG_UINT32* pui32AddressOffsets, + IMG_UINT32* ui32NumAddrOffsets); + + +/*--------------------- +The set of macros below follows this pattern: + +f(x) = if F -> f2(g(x)) + else -> g(x) + +g(x) = if G -> g2(h(x)) + else -> h(x) + +h(x) = ... + +-----------------------*/ + +/*If level 3 wrapper is enabled, we add a PVR_TRACE and call the next level, else just call the next level*/ +#ifdef PVRSRV_LOG_MEMORY_ALLOCS + #define OSAllocMem(flags, size, linAddr, blockAlloc, logStr) \ + (PVR_TRACE(("OSAllocMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): " logStr " (size = 0x%lx)", size)), \ + OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)) + + #define OSAllocPages(flags, size, pageSize, privdata, privdatalength, bmhandle, linAddr, pageAlloc) \ + (PVR_TRACE(("OSAllocPages(" #flags ", " #size ", " #pageSize ", " #linAddr ", " #pageAlloc "): (size = 0x%lx)", size)), \ + OSAllocPages_Impl(flags, size, pageSize, linAddr, privdata, privdatalength, bmhandle, pageAlloc)) + + #define OSFreeMem(flags, size, linAddr, blockAlloc) \ + (PVR_TRACE(("OSFreeMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): (pointer = 0x%X)", linAddr)), \ + OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)) +#else + #define OSAllocMem(flags, size, linAddr, blockAlloc, logString) \ + OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__) + + #define OSAllocPages OSAllocPages_Impl + + #define OSFreeMem(flags, size, linAddr, blockAlloc) \ + OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__) +#endif + +/*If level 2 wrapper is enabled declare the function, +else alias to level 1 wrapper, else the wrapper function will be used*/ +#ifdef PVRSRV_DEBUG_OS_MEMORY + + PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID *ppvCpuVAddr, + IMG_HANDLE *phBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line); + + PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags, + IMG_UINT32 ui32Size, + IMG_PVOID pvCpuVAddr, + IMG_HANDLE hBlockAlloc, + IMG_CHAR *pszFilename, + IMG_UINT32 ui32Line); + + + typedef struct + { + IMG_UINT8 sGuardRegionBefore[8]; + IMG_CHAR sFileName[128]; + IMG_UINT32 uLineNo; + IMG_SIZE_T uSize; + IMG_SIZE_T uSizeParityCheck; + enum valid_tag + { isFree = 0x277260FF, + isAllocated = 0x260511AA + } eValid; + } OSMEM_DEBUG_INFO; + + #define TEST_BUFFER_PADDING_STATUS (sizeof(OSMEM_DEBUG_INFO)) + #define TEST_BUFFER_PADDING_AFTER (8) + #define TEST_BUFFER_PADDING (TEST_BUFFER_PADDING_STATUS + TEST_BUFFER_PADDING_AFTER) +#else + #define OSAllocMem_Debug_Wrapper OSAllocMem_Debug_Linux_Memory_Allocations + #define OSFreeMem_Debug_Wrapper OSFreeMem_Debug_Linux_Memory_Allocations +#endif + +/*If level 1 wrapper is enabled declare the functions with extra parameters +else alias to level 0 and declare the functions without the extra debugging parameters*/ +#if (defined(__linux__) || defined(__QNXNTO__)) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS) + PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line); + PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line); + + #define OSAllocMem_Debug_Linux_Memory_Allocations OSAllocMem_Impl + #define OSFreeMem_Debug_Linux_Memory_Allocations OSFreeMem_Impl +#else + PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc); + PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc); + + #define OSAllocMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \ + OSAllocMem_Impl(flags, size, addr, blockAlloc) + #define OSFreeMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \ + OSFreeMem_Impl(flags, size, addr, blockAlloc) +#endif + + +#if defined(__linux__) || defined(__QNXNTO__) +IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_SIZE_T ui32ByteOffset); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSMemHandleToCpuPAddr) +#endif +static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_SIZE_T ui32ByteOffset) +{ + IMG_CPU_PHYADDR sCpuPAddr; + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + PVR_UNREFERENCED_PARAMETER(ui32ByteOffset); + sCpuPAddr.uiAddr = 0; + return sCpuPAddr; +} +#endif + +#if defined(__linux__) +IMG_BOOL OSMemHandleIsPhysContig(IMG_VOID *hOSMemHandle); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSMemHandleIsPhysContig) +#endif +static INLINE IMG_BOOL OSMemHandleIsPhysContig(IMG_HANDLE hOSMemHandle) +{ + PVR_UNREFERENCED_PARAMETER(hOSMemHandle); + return IMG_FALSE; +} +#endif + +PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData); +PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData); +IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc); +IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T ui32Size, const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4); +#define OSStringLength(pszString) strlen(pszString) + +#if defined (SUPPORT_SID_INTERFACE) +PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, + PVRSRV_EVENTOBJECT_KM *psEventObject); +PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT_KM *psEventObject); +PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM); +PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM); +PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT_KM *psEventObject, + IMG_HANDLE *phOSEvent); +PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT_KM *psEventObject, + IMG_HANDLE hOSEventKM); +#else +PVRSRV_ERROR OSEventObjectCreateKM(const IMG_CHAR *pszName, + PVRSRV_EVENTOBJECT *psEventObject); +PVRSRV_ERROR OSEventObjectDestroyKM(PVRSRV_EVENTOBJECT *psEventObject); +PVRSRV_ERROR OSEventObjectSignalKM(IMG_HANDLE hOSEventKM); +PVRSRV_ERROR OSEventObjectWaitKM(IMG_HANDLE hOSEventKM); +PVRSRV_ERROR OSEventObjectOpenKM(PVRSRV_EVENTOBJECT *psEventObject, + IMG_HANDLE *phOSEvent); +PVRSRV_ERROR OSEventObjectCloseKM(PVRSRV_EVENTOBJECT *psEventObject, + IMG_HANDLE hOSEventKM); +#endif /* #if defined (SUPPORT_SID_INTERFACE) */ + + +PVRSRV_ERROR OSBaseAllocContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr); +PVRSRV_ERROR OSBaseFreeContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr); + +IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_SIZE_T ui32Size,IMG_HANDLE *phMemBlock); +IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess); +IMG_VOID OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess); + +IMG_VOID UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_SIZE_T ui32Size, IMG_HANDLE hMemBlock); + +PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie, + IMG_SYS_PHYADDR sCPUPhysAddr, + IMG_SIZE_T uiSizeInBytes, + IMG_UINT32 ui32CacheFlags, + IMG_PVOID *ppvUserAddr, + IMG_SIZE_T *puiActualSize, + IMG_HANDLE hMappingHandle); + +PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie, + IMG_PVOID pvUserAddr, + IMG_PVOID pvProcess); + +PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); +PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); +IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); +PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource); +PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource); +IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID); + +#if defined(SYS_CUSTOM_POWERLOCK_WRAP) +#define OSPowerLockWrap SysPowerLockWrap +#define OSPowerLockUnwrap SysPowerLockUnwrap +#else +/****************************************************************************** + @Function OSPowerLockWrap + + @Description OS-specific wrapper around the power lock + + @Input bTryLock - don't block on lock contention + + @Return PVRSRV_ERROR +******************************************************************************/ +PVRSRV_ERROR OSPowerLockWrap(IMG_BOOL bTryLock); + +/****************************************************************************** + @Function OSPowerLockUnwrap + + @Description OS-specific wrapper around the power unlock + + @Return IMG_VOID +******************************************************************************/ +IMG_VOID OSPowerLockUnwrap(IMG_VOID); +#endif /* SYS_CUSTOM_POWERLOCK_WRAP */ + +/*! +****************************************************************************** + + @Function OSWaitus + + @Description + This function implements a busy wait of the specified microseconds + This function does NOT release thread quanta + + @Input ui32Timeus - (us) + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus); + +/*! +****************************************************************************** + + @Function OSSleepms + + @Description + This function implements a sleep of the specified milliseconds + This function may allow pre-emption if implemented + + @Input ui32Timems - (ms) + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID OSSleepms(IMG_UINT32 ui32Timems); + +IMG_HANDLE OSFuncHighResTimerCreate(IMG_VOID); +IMG_UINT32 OSFuncHighResTimerGetus(IMG_HANDLE hTimer); +IMG_VOID OSFuncHighResTimerDestroy(IMG_HANDLE hTimer); +IMG_VOID OSReleaseThreadQuanta(IMG_VOID); +IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg); +IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value); + +IMG_IMPORT +IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset); + +IMG_IMPORT +IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); + +IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs); + +#ifndef OSReadHWReg +IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset); +#endif +#ifndef OSWriteHWReg +IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); +#endif + +typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*); +IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout); +PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer); +PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer); +PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer); + +PVRSRV_ERROR OSGetSysMemSize(IMG_SIZE_T *pui32Bytes); + +typedef enum _HOST_PCI_INIT_FLAGS_ +{ + HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001, + HOST_PCI_INIT_FLAG_MSI = 0x00000002, + HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff +} HOST_PCI_INIT_FLAGS; + +struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; +typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; + +PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags); +PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ); +IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); +PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); +PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); + +PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData); + +/****************************************************************************** + + @Function OSPanic + + @Description Take action in response to an unrecoverable driver error + + @Input IMG_VOID + + @Return IMG_VOID + +******************************************************************************/ +IMG_VOID OSPanic(IMG_VOID); + +IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID); + +typedef enum _img_verify_test +{ + PVR_VERIFY_WRITE = 0, + PVR_VERIFY_READ +} IMG_VERIFY_TEST; + +IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T ui32Bytes); + +PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes); +PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes); + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr, + IMG_SIZE_T ui32Bytes, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_HANDLE *phOSWrapMem); +PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem); +#else +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSAcquirePhysPageAddr) +#endif +static INLINE PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr, + IMG_SIZE_T ui32Bytes, + IMG_SYS_PHYADDR *psSysPAddr, + IMG_HANDLE *phOSWrapMem) +{ + PVR_UNREFERENCED_PARAMETER(pvCPUVAddr); + PVR_UNREFERENCED_PARAMETER(ui32Bytes); + PVR_UNREFERENCED_PARAMETER(psSysPAddr); + PVR_UNREFERENCED_PARAMETER(phOSWrapMem); + return PVRSRV_OK; +} +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReleasePhysPageAddr) +#endif +static INLINE PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem) +{ + PVR_UNREFERENCED_PARAMETER(hOSWrapMem); + return PVRSRV_OK; +} +#endif + +#if defined(__linux__) && defined(__KERNEL__) + +#define OS_SUPPORTS_IN_LISR + +static inline IMG_BOOL OSInLISR(IMG_VOID unref__ *pvSysData) +{ + PVR_UNREFERENCED_PARAMETER(pvSysData); + return (in_irq()) ? IMG_TRUE : IMG_FALSE; +} + +static inline IMG_VOID OSWriteMemoryBarrier(IMG_VOID) +{ + wmb(); +} + +static inline IMG_VOID OSMemoryBarrier(IMG_VOID) +{ + mb(); +} + +#else /* defined(__linux__) && defined(__KERNEL__) */ + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSWriteMemoryBarrier) +#endif +static INLINE IMG_VOID OSWriteMemoryBarrier(IMG_VOID) { } + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSMemoryBarrier) +#endif +static INLINE IMG_VOID OSMemoryBarrier(IMG_VOID) { } + +#endif /* defined(__linux__) && defined(__KERNEL__) */ + +/* Atomic functions */ +PVRSRV_ERROR OSAtomicAlloc(IMG_PVOID *ppvRefCount); +IMG_VOID OSAtomicFree(IMG_PVOID pvRefCount); +IMG_VOID OSAtomicInc(IMG_PVOID pvRefCount); +IMG_BOOL OSAtomicDecAndTest(IMG_PVOID pvRefCount); +IMG_UINT32 OSAtomicRead(IMG_PVOID pvRefCount); + +PVRSRV_ERROR OSTimeCreateWithUSOffset(IMG_PVOID *pvRet, IMG_UINT32 ui32MSOffset); +IMG_BOOL OSTimeHasTimePassed(IMG_PVOID pvData); +IMG_VOID OSTimeDestroy(IMG_PVOID pvData); + +#if defined(__linux__) +IMG_VOID OSReleaseBridgeLock(IMG_VOID); +IMG_VOID OSReacquireBridgeLock(IMG_VOID); +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReleaseBridgeLock) +#endif +static INLINE IMG_VOID OSReleaseBridgeLock(IMG_VOID) { } + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSReacquireBridgeLock) +#endif +static INLINE IMG_VOID OSReacquireBridgeLock(IMG_VOID) { } + +#endif + +#if defined(__linux__) +IMG_VOID OSGetCurrentProcessNameKM(IMG_CHAR *pszName, IMG_UINT32 ui32Size); +#else + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSGetCurrentProcessNameKM) +#endif +static INLINE IMG_VOID OSGetCurrentProcessNameKM(IMG_CHAR *pszName, IMG_UINT32 ui32Size) +{ + PVR_UNREFERENCED_PARAMETER(pszName); + PVR_UNREFERENCED_PARAMETER(ui32Size); +} + +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* __OSFUNC_H__ */ + +/****************************************************************************** + End of file (osfunc.h) +******************************************************************************/ + diff --git a/pvr-source/services4/srvkm/include/osperproc.h b/pvr-source/services4/srvkm/include/osperproc.h new file mode 100644 index 0000000..0b962b4 --- /dev/null +++ b/pvr-source/services4/srvkm/include/osperproc.h @@ -0,0 +1,94 @@ +/*************************************************************************/ /*! +@Title OS specific per process data interface +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description OS specific per process data interface +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __OSPERPROC_H__ +#define __OSPERPROC_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(__linux__) || defined(__QNXNTO__) +PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData); +PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData); + +PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); +#else /* defined(__linux__) */ +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSPerProcessPrivateDataInit) +#endif +static INLINE PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData) +{ + PVR_UNREFERENCED_PARAMETER(phOsPrivateData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSPerProcessPrivateDataDeInit) +#endif +static INLINE PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData) +{ + PVR_UNREFERENCED_PARAMETER(hOsPrivateData); + + return PVRSRV_OK; +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(OSPerProcessSetHandleOptions) +#endif +static INLINE PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) +{ + PVR_UNREFERENCED_PARAMETER(psHandleBase); + + return PVRSRV_OK; +} +#endif /* defined(__linux__) */ + +#if defined (__cplusplus) +} +#endif + +#endif /* __OSPERPROC_H__ */ + +/****************************************************************************** + End of file (osperproc.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/pdump_int.h b/pvr-source/services4/srvkm/include/pdump_int.h new file mode 100644 index 0000000..a76fed0 --- /dev/null +++ b/pvr-source/services4/srvkm/include/pdump_int.h @@ -0,0 +1,100 @@ +/*************************************************************************/ /*! +@Title Parameter dump internal common functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef __PDUMP_INT_H__ +#define __PDUMP_INT_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* + * This file contains internal pdump utility functions which may be accessed + * from OS-specific code. The header should not be included outside of srvkm + * pdump files. + */ + +#if !defined(_UITRON) +/* + * No dbgdriver on uitron, so ignore any common functions for communicating + * with dbgdriver. + */ +#include "dbgdrvif.h" + +/* Callbacks which are registered with the debug driver. */ +IMG_EXPORT IMG_VOID PDumpConnectionNotify(IMG_VOID); + +#endif /* !defined(_UITRON) */ + +typedef enum +{ + /* Continuous writes are always captured in the dbgdrv; the buffer will + * expand if no client/sink process is running. + */ + PDUMP_WRITE_MODE_CONTINUOUS = 0, + /* Last frame capture */ + PDUMP_WRITE_MODE_LASTFRAME, + /* Capture frame, binary data */ + PDUMP_WRITE_MODE_BINCM, + /* Persistent capture, append data to init phase */ + PDUMP_WRITE_MODE_PERSISTENT +} PDUMP_DDWMODE; + + +IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags); + +IMG_UINT32 PDumpOSDebugDriverWrite( PDBG_STREAM psStream, + PDUMP_DDWMODE eDbgDrvWriteMode, + IMG_UINT8 *pui8Data, + IMG_UINT32 ui32BCount, + IMG_UINT32 ui32Level, + IMG_UINT32 ui32DbgDrvFlags); + +#if defined (__cplusplus) +} +#endif +#endif /* __PDUMP_INT_H__ */ + +/****************************************************************************** + End of file (pdump_int.h) +******************************************************************************/ + diff --git a/pvr-source/services4/srvkm/include/pdump_km.h b/pvr-source/services4/srvkm/include/pdump_km.h new file mode 100644 index 0000000..e4325cc --- /dev/null +++ b/pvr-source/services4/srvkm/include/pdump_km.h @@ -0,0 +1,441 @@ +/*************************************************************************/ /*! +@Title pdump functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for pdump functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _PDUMP_KM_H_ +#define _PDUMP_KM_H_ + + +/* + * Include the OS abstraction APIs + */ +#include "pdump_osfunc.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* + * Pull in pdump flags from services include + */ +#include "pdump.h" + +#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 +#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 + +/* + * PDump streams (common to all OSes) + */ +#define PDUMP_STREAM_PARAM2 0 +#define PDUMP_STREAM_SCRIPT2 1 +#define PDUMP_STREAM_DRIVERINFO 2 +#define PDUMP_NUM_STREAMS 3 + +#if defined(PDUMP_DEBUG_OUTFILES) +/* counter increments each time debug write is called */ +extern IMG_UINT32 g_ui32EveryLineCounter; +#endif + +#ifndef PDUMP +#define MAKEUNIQUETAG(hMemInfo) (0) +#endif + +#ifdef PDUMP + +#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)(hMemInfo))->sMemBlk.hBuffer))->pMapping) + + IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Value, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_IMPORT PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psProcData, + IMG_PVOID pvAltLinAddr, + IMG_PVOID pvLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_DEV_PHYADDR *pPages, + IMG_UINT32 ui32NumPages, + IMG_DEV_VIRTADDR sDevAddr, + IMG_UINT32 ui32Start, + IMG_UINT32 ui32Length, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + PVRSRV_ERROR PDumpMemPDEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2); + + PVRSRV_ERROR PDumpMemPTEntriesKM(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32Bytes, + IMG_UINT32 ui32Flags, + IMG_BOOL bInitialisePages, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2); + IMG_VOID PDumpInitCommon(IMG_VOID); + IMG_VOID PDumpDeInitCommon(IMG_VOID); + IMG_VOID PDumpInit(IMG_VOID); + IMG_VOID PDumpDeInit(IMG_VOID); + IMG_BOOL PDumpIsSuspended(IMG_VOID); + PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID); + PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID); + IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame); + IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags); + IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags); + + PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Flags); + PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + IMG_UINT32 ui32Flags, + PDUMP_POLL_OPERATOR eOperator); + PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegAddr, + IMG_UINT32 ui32RegValue, + IMG_UINT32 ui32Mask, + PDUMP_POLL_OPERATOR eOperator); + + IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Width, + IMG_UINT32 ui32Height, + IMG_UINT32 ui32StrideInBytes, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_HANDLE hDevMemContext, + IMG_UINT32 ui32Size, + PDUMP_PIXEL_FORMAT ePixelFormat, + PDUMP_MEM_FORMAT eMemFormat, + IMG_UINT32 ui32PDumpFlags); + IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_UINT32 ui32Address, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpRegKM(IMG_CHAR* pszPDumpRegName, + IMG_UINT32 dwReg, + IMG_UINT32 dwData); + + PVRSRV_ERROR PDumpComment(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2); + PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, + IMG_CHAR* pszFormat, + ...) IMG_FORMAT_PRINTF(2, 3); + + PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32dwData, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_UINT32 ui32Reg, + IMG_UINT32 ui32Data, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID); + IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID); + + IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_UINT32 ui32DevVAddr, + IMG_PUINT32 pui32PhysPages, + IMG_UINT32 ui32NumPages, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_CHAR *pszMemSpace, + IMG_UINT32 *pui32MMUContextID, + IMG_UINT32 ui32MMUType, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hOSMemHandle, + IMG_VOID *pvPDCPUAddr); + PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_CHAR *pszMemSpace, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32MMUType); + + PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 ui32Offset, + IMG_DEV_PHYADDR sPDDevPAddr, + IMG_HANDLE hUniqueTag1, + IMG_HANDLE hUniqueTag2); + + IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame); + + PVRSRV_ERROR PDumpSaveMemKM (PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32DataMaster, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpTASignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_UINT32 ui32TAKickCount, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters); + + PVRSRV_ERROR PDump3DSignatureRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters); + + PVRSRV_ERROR PDumpCounterRegisters(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_UINT32 ui32DumpFrameNum, + IMG_BOOL bLastFrame, + IMG_UINT32 *pui32Registers, + IMG_UINT32 ui32NumRegisters); + + PVRSRV_ERROR PDumpRegRead(IMG_CHAR *pszPDumpRegName, + const IMG_UINT32 dwRegOffset, + IMG_UINT32 ui32Flags); + + PVRSRV_ERROR PDumpCycleCountRegRead(PVRSRV_DEVICE_IDENTIFIER *psDevId, + const IMG_UINT32 dwRegOffset, + IMG_BOOL bLastFrame); + + PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags); + PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks); + + PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_UINT32 ui32DevVAddr, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_BOOL bShared, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + PVRSRV_ERROR PDumpFreePages(struct _BM_HEAP_ *psBMHeap, + IMG_DEV_VIRTADDR sDevVAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32PageSize, + IMG_HANDLE hUniqueTag, + IMG_BOOL bInterleaved, + IMG_BOOL bSparse); + PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_IDENTIFIER *psDevID, + IMG_HANDLE hOSMemHandle, + IMG_CPU_VIRTADDR pvLinAddr, + IMG_UINT32 ui32NumBytes, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + IMG_IMPORT PVRSRV_ERROR PDumpHWPerfCBKM(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpSignatureBuffer(PVRSRV_DEVICE_IDENTIFIER *psDevId, + IMG_CHAR *pszFileName, + IMG_CHAR *pszBufferType, + IMG_UINT32 ui32FileOffset, + IMG_DEV_VIRTADDR sDevBaseAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32MMUContextID, + IMG_UINT32 ui32PDumpFlags); + + PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo, + IMG_UINT32 ui32ROffOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags, + IMG_HANDLE hUniqueTag); + + PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName, + IMG_UINT32 ui32RegOffset, + IMG_UINT32 ui32WPosVal, + IMG_UINT32 ui32PacketSize, + IMG_UINT32 ui32BufferSize, + IMG_UINT32 ui32Flags); + + IMG_VOID PDumpVGXMemToFile(IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 uiAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hUniqueTag); + + IMG_VOID PDumpSuspendKM(IMG_VOID); + IMG_VOID PDumpResumeKM(IMG_VOID); + + /* New pdump common functions */ + PVRSRV_ERROR PDumpStoreMemToFile(PDUMP_MMU_ATTRIB *psMMUAttrib, + IMG_CHAR *pszFileName, + IMG_UINT32 ui32FileOffset, + PVRSRV_KERNEL_MEM_INFO *psMemInfo, + IMG_UINT32 uiAddr, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32PDumpFlags, + IMG_HANDLE hUniqueTag); + + #define PDUMPMEMPOL PDumpMemPolKM + #define PDUMPMEM PDumpMemKM + #define PDUMPMEMPTENTRIES PDumpMemPTEntriesKM + #define PDUMPPDENTRIES PDumpMemPDEntriesKM + #define PDUMPMEMUM PDumpMemUM + #define PDUMPINIT PDumpInitCommon + #define PDUMPDEINIT PDumpDeInitCommon + #define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM + #define PDUMPTESTFRAME PDumpIsCaptureFrameKM + #define PDUMPTESTNEXTFRAME PDumpTestNextFrame + #define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM + #define PDUMPREG PDumpRegKM + #define PDUMPCOMMENT PDumpComment + #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags + #define PDUMPREGPOL PDumpRegPolKM + #define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM + #define PDUMPMALLOCPAGES PDumpMallocPages + #define PDUMPMALLOCPAGETABLE PDumpMallocPageTable + #define PDUMPSETMMUCONTEXT PDumpSetMMUContext + #define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext + #define PDUMPPDDEVPADDR PDumpPDDevPAddrKM + #define PDUMPFREEPAGES PDumpFreePages + #define PDUMPFREEPAGETABLE PDumpFreePageTable + #define PDUMPPDREG PDumpPDReg + #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags + #define PDUMPCBP PDumpCBP + #define PDUMPREGBASEDCBP PDumpRegBasedCBP + #define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys + #define PDUMPENDINITPHASE PDumpStopInitPhaseKM + #define PDUMPBITMAPKM PDumpBitmapKM + #define PDUMPDRIVERINFO PDumpDriverInfoKM + #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags + #define PDUMPIDL PDumpIDL + #define PDUMPSUSPEND PDumpSuspendKM + #define PDUMPRESUME PDumpResumeKM + +#else +#if defined LINUX || defined (__QNXNTO__) || defined GCC_IA32 || defined GCC_ARM + #define PDUMPMEMPOL(args...) + #define PDUMPMEM(args...) + #define PDUMPMEMPTENTRIES(args...) + #define PDUMPPDENTRIES(args...) + #define PDUMPMEMUM(args...) + #define PDUMPINIT(args...) + #define PDUMPDEINIT(args...) + #define PDUMPISLASTFRAME(args...) + #define PDUMPTESTFRAME(args...) + #define PDUMPTESTNEXTFRAME(args...) + #define PDUMPREGWITHFLAGS(args...) + #define PDUMPREG(args...) + #define PDUMPCOMMENT(args...) + #define PDUMPREGPOL(args...) + #define PDUMPREGPOLWITHFLAGS(args...) + #define PDUMPMALLOCPAGES(args...) + #define PDUMPMALLOCPAGETABLE(args...) + #define PDUMPSETMMUCONTEXT(args...) + #define PDUMPCLEARMMUCONTEXT(args...) + #define PDUMPPDDEVPADDR(args...) + #define PDUMPFREEPAGES(args...) + #define PDUMPFREEPAGETABLE(args...) + #define PDUMPPDREG(args...) + #define PDUMPPDREGWITHFLAGS(args...) + #define PDUMPSYNC(args...) + #define PDUMPCOPYTOMEM(args...) + #define PDUMPWRITE(args...) + #define PDUMPCBP(args...) + #define PDUMPREGBASEDCBP(args...) + #define PDUMPCOMMENTWITHFLAGS(args...) + #define PDUMPMALLOCPAGESPHYS(args...) + #define PDUMPENDINITPHASE(args...) + #define PDUMPMSVDXREG(args...) + #define PDUMPMSVDXREGWRITE(args...) + #define PDUMPMSVDXREGREAD(args...) + #define PDUMPMSVDXPOLEQ(args...) + #define PDUMPMSVDXPOL(args...) + #define PDUMPBITMAPKM(args...) + #define PDUMPDRIVERINFO(args...) + #define PDUMPIDLWITHFLAGS(args...) + #define PDUMPIDL(args...) + #define PDUMPSUSPEND(args...) + #define PDUMPRESUME(args...) + #define PDUMPMSVDXWRITEREF(args...) + #else + #error Compiler not specified + #endif +#endif + +#if defined (__cplusplus) +} +#endif + +#endif /* _PDUMP_KM_H_ */ + +/****************************************************************************** + End of file (pdump_km.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/pdump_osfunc.h b/pvr-source/services4/srvkm/include/pdump_osfunc.h new file mode 100644 index 0000000..0f2e103 --- /dev/null +++ b/pvr-source/services4/srvkm/include/pdump_osfunc.h @@ -0,0 +1,337 @@ +/*************************************************************************/ /*! +@Title OS-independent interface to helper functions for pdump +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#include <stdarg.h> + +#if defined(__cplusplus) +extern "C" { +#endif + + +/* + * Some OSes (WinXP,CE) allocate the string on the stack, but some + * (Linux,Symbian) use a global variable/lock instead. + * Would be good to use the same across all OSes. + * + * A handle is returned which represents IMG_CHAR* type on all OSes except + * Symbian when it represents PDumpState* type. + * + * The allocated buffer length is also returned on OSes where it's + * supported (e.g. Linux). + */ +#define MAX_PDUMP_STRING_LENGTH (256) +#if defined(__QNXNTO__) +#define PDUMP_GET_SCRIPT_STRING() \ + IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; \ + IMG_HANDLE hScript = (IMG_HANDLE)pszScript; + +#define PDUMP_GET_MSG_STRING() \ + IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; + +#define PDUMP_GET_FILE_STRING() \ + IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; + +#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ + IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \ + IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \ + IMG_UINT32 ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1; \ + IMG_UINT32 ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1; \ + IMG_HANDLE hScript = (IMG_HANDLE)pszScript; + +#else /* WIN32 or QNX */ + + + /* + * Linux + */ +#define PDUMP_GET_SCRIPT_STRING() \ + IMG_HANDLE hScript; \ + IMG_UINT32 ui32MaxLen; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\ + if(eError != PVRSRV_OK) return eError; + +#define PDUMP_GET_MSG_STRING() \ + IMG_CHAR *pszMsg; \ + IMG_UINT32 ui32MaxLen; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetMessageString(&pszMsg, &ui32MaxLen);\ + if(eError != PVRSRV_OK) return eError; + +#define PDUMP_GET_FILE_STRING() \ + IMG_CHAR *pszFileName; \ + IMG_UINT32 ui32MaxLen; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\ + if(eError != PVRSRV_OK) return eError; + +#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ + IMG_HANDLE hScript; \ + IMG_CHAR *pszFileName; \ + IMG_UINT32 ui32MaxLenScript; \ + IMG_UINT32 ui32MaxLenFileName; \ + PVRSRV_ERROR eError; \ + eError = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\ + if(eError != PVRSRV_OK) return eError; \ + eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\ + if(eError != PVRSRV_OK) return eError; + + /*! + * @name PDumpOSGetScriptString + * @brief Get the "script" buffer + * @param phScript - buffer handle for pdump script + * @param pui32MaxLen - max length of the script buffer + * FIXME: the max length should be internal to the OS-specific code + * @return error (always PVRSRV_OK on some OSes) + */ + PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen); + + /*! + * @name PDumpOSGetMessageString + * @brief Get the "message" buffer + * @param pszMsg - buffer pointer for pdump messages + * @param pui32MaxLen - max length of the message buffer + * FIXME: the max length should be internal to the OS-specific code + * @return error (always PVRSRV_OK on some OSes) + */ + PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, IMG_UINT32 *pui32MaxLen); + + /*! + * @name PDumpOSGetFilenameString + * @brief Get the "filename" buffer + * @param ppszFile - buffer pointer for filename + * @param pui32MaxLen - max length of the filename buffer + * FIXME: the max length should be internal to the OS-specific code + * @return error (always PVRSRV_OK on some OSes) + */ + PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen); + +#endif /* WIN32 or QNX */ + + +/* + * Define macro for processing variable args list in OS-independent + * manner. See e.g. PDumpComment(). + */ + +#define PDUMP_va_list va_list +#define PDUMP_va_start va_start +#define PDUMP_va_end va_end + + + +/*! + * @name PDumpOSGetStream + * @brief Get a handle to the labelled stream (cast the handle to PDBG_STREAM to use it) + * @param ePDumpStream - stream label + */ +IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream); + +/*! + * @name PDumpOSGetStreamOffset + * @brief Return current offset within the labelled stream + * @param ePDumpStream - stream label + */ +IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream); + +/*! + * @name PDumpOSGetParamFileNum + * @brief Return file number of the 'script' stream, in the case that the file was split + * @param ePDumpStream - stream label + */ +IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID); + +/*! + * @name PDumpOSCheckForSplitting + * @brief Check if the requested pdump params are too large for a single file + * @param hStream - pdump stream + * @param ui32Size - size of params to dump (bytes) + * @param ui32Flags - pdump flags + */ +IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags); + +/*! + * @name PDumpOSIsSuspended + * @brief Is the pdump stream busy? + * @return IMG_BOOL + */ +IMG_BOOL PDumpOSIsSuspended(IMG_VOID); + +/*! + * @name PDumpOSIsSuspended + * @brief Is the pdump jump table initialised? + * @return IMG_BOOL + */ +IMG_BOOL PDumpOSJTInitialised(IMG_VOID); + +/*! + * @name PDumpOSWriteString + * @brief General function for writing to pdump stream. + * Usually more convenient to use PDumpOSWriteString2 below. + * @param hDbgStream - pdump stream handle + * @param psui8Data - data to write + * @param ui32Size - size of write + * @param ui32Flags - pdump flags + * @return error + */ +IMG_BOOL PDumpOSWriteString(IMG_HANDLE hDbgStream, + IMG_UINT8 *psui8Data, + IMG_UINT32 ui32Size, + IMG_UINT32 ui32Flags); + +/*! + * @name PDumpOSWriteString2 + * @brief Write a string to the "script" output stream + * @param pszScript - buffer to write (ptr to state structure on Symbian) + * @param ui32Flags - pdump flags + * @return error + */ +IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags); + +/*! + * @name PDumpOSBufprintf + * @brief Printf to OS-specific pdump state buffer + * @param hBuf - buffer handle to write into (ptr to state structure on Symbian) + * @param ui32ScriptSizeMax - maximum size of data to write (not supported on all OSes) + * @param pszFormat - format string + */ +PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(3, 4); + +/*! + * @name PDumpOSDebugPrintf + * @brief Debug message during pdumping + * @param pszFormat - format string + */ +IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...) IMG_FORMAT_PRINTF(1, 2); + +/* + * Write into a IMG_CHAR* on all OSes. Can be allocated on the stack or heap. + */ +/*! + * @name PDumpOSSprintf + * @brief Printf to IMG char array + * @param pszComment - char array to print into + * @param pszFormat - format string + */ +PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(3, 4); + +/*! + * @name PDumpOSVSprintf + * @brief Printf to IMG string using variable args (see stdarg.h). This is necessary + * because the ... notation does not support nested function calls. + * @param pszMsg - char array to print into + * @param ui32ScriptSizeMax - maximum size of data to write (not supported on all OSes) + * @param pszFormat - format string + * @param vaArgs - variable args structure (from stdarg.h) + */ +PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) IMG_FORMAT_PRINTF(3, 0); + +/*! + * @name PDumpOSBuflen + * @param hBuffer - handle to buffer (ptr to state structure on Symbian) + * @param ui32BuffeRSizeMax - max size of buffer (chars) + * @return length of buffer, will always be <= ui32BufferSizeMax + */ +IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax); + +/*! + * @name PDumpOSVerifyLineEnding + * @brief Put \r\n sequence at the end if it isn't already there + * @param hBuffer - handle to buffer + * @param ui32BufferSizeMax - max size of buffer (chars) + */ +IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax); + +/*! + * @name PDumpOSCPUVAddrToDevPAddr + * @brief OS function to convert CPU virtual to device physical for dumping pages + * @param hOSMemHandle mem allocation handle (used if kernel virtual mem space is limited, e.g. linux) + * @param ui32Offset dword offset into allocation (for use with mem handle, e.g. linux) + * @param pui8LinAddr CPU linear addr (usually a kernel virtual address) + * @param ui32PageSize page size, used for assertion check + * @return psDevPAddr device physical addr + */ +IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, + IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_UINT8 *pui8LinAddr, + IMG_UINT32 ui32PageSize, + IMG_DEV_PHYADDR *psDevPAddr); + +/*! + * @name PDumpOSCPUVAddrToPhysPages + * @brief OS function to convert CPU virtual to backing physical pages + * @param hOSMemHandle mem allocation handle (used if kernel virtual mem space is limited, e.g. linux) + * @param ui32Offset offset within mem allocation block + * @param pui8LinAddr CPU linear addr + * @param ui32DataPageMask mask for data page (= data page size -1) + * @return pui32PageOffset CPU page offset (same as device page offset if page sizes equal) + */ +IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle, + IMG_UINT32 ui32Offset, + IMG_PUINT8 pui8LinAddr, + IMG_UINT32 ui32DataPageMask, + IMG_UINT32 *pui32PageOffset); + +/*! + * @name PDumpOSReleaseExecution + * @brief OS function to switch to another process, to clear pdump buffers + */ +IMG_VOID PDumpOSReleaseExecution(IMG_VOID); + +/*! + * @name PDumpOSIsCaptureFrameKM + * @brief Is the current frame a capture frame? + */ +IMG_BOOL PDumpOSIsCaptureFrameKM(IMG_VOID); + +/*! + * @name PDumpOSSetFrameKM + * @brief Set frame counter + */ +PVRSRV_ERROR PDumpOSSetFrameKM(IMG_UINT32 ui32Frame); + +#if defined (__cplusplus) +} +#endif diff --git a/pvr-source/services4/srvkm/include/perfkm.h b/pvr-source/services4/srvkm/include/perfkm.h new file mode 100644 index 0000000..458a29b --- /dev/null +++ b/pvr-source/services4/srvkm/include/perfkm.h @@ -0,0 +1,53 @@ +/*************************************************************************/ /*! +@Title Perf initialisation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef _PERFKM_H_ +#define _PERFKM_H_ + +#include "img_types.h" + +#define PERFINIT() +#define PERFDEINIT() + +#endif /* _PERFKM_H_ */ + +/****************************************************************************** + End of file (perfkm.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/perproc.h b/pvr-source/services4/srvkm/include/perproc.h new file mode 100644 index 0000000..d603613 --- /dev/null +++ b/pvr-source/services4/srvkm/include/perproc.h @@ -0,0 +1,150 @@ +/*************************************************************************/ /*! +@Title Handle Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Perprocess data +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef __PERPROC_H__ +#define __PERPROC_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "img_types.h" +#include "resman.h" + +#include "handle.h" + +typedef struct _PVRSRV_PER_PROCESS_DATA_ +{ + IMG_UINT32 ui32PID; + IMG_HANDLE hBlockAlloc; + PRESMAN_CONTEXT hResManContext; +#if defined (SUPPORT_SID_INTERFACE) + IMG_SID hPerProcData; +#else + IMG_HANDLE hPerProcData; +#endif + PVRSRV_HANDLE_BASE *psHandleBase; +#if defined (SUPPORT_SID_INTERFACE) + /* Handles are being allocated in batches */ + IMG_BOOL bHandlesBatched; +#else +#if defined (PVR_SECURE_HANDLES) + /* Handles are being allocated in batches */ + IMG_BOOL bHandlesBatched; +#endif /* PVR_SECURE_HANDLES */ +#endif /* SUPPORT_SID_INTERFACE */ + IMG_UINT32 ui32RefCount; + + /* True if the process is the initialisation server. */ + IMG_BOOL bInitProcess; +#if defined(PDUMP) + /* True if pdump data from the process is 'persistent' */ + IMG_BOOL bPDumpPersistent; +#if defined(SUPPORT_PDUMP_MULTI_PROCESS) + /* True if this process is marked for pdumping. This flag is + * significant in a multi-app environment. + */ + IMG_BOOL bPDumpActive; +#endif /* SUPPORT_PDUMP_MULTI_PROCESS */ +#endif + /* + * OS specific data can be stored via this handle. + * See osperproc.h for a generic mechanism for initialising + * this field. + */ + IMG_HANDLE hOsPrivateData; +} PVRSRV_PER_PROCESS_DATA; + +PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID); + +PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags); +IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID); + +PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID); +PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID); + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFindPerProcessData) +#endif +static INLINE +PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(IMG_VOID) +{ + return PVRSRVPerProcessData(OSGetCurrentProcessIDKM()); +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVProcessPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVProcessPrivateData(PVRSRV_PER_PROCESS_DATA *psPerProc) +{ + return (psPerProc != IMG_NULL) ? psPerProc->hOsPrivateData : IMG_NULL; +} + + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVPerProcessPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVPerProcessPrivateData(IMG_UINT32 ui32PID) +{ + return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID)); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVFindPerProcessPrivateData) +#endif +static INLINE +IMG_HANDLE PVRSRVFindPerProcessPrivateData(IMG_VOID) +{ + return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData()); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* __PERPROC_H__ */ + +/****************************************************************************** + End of file (perproc.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/power.h b/pvr-source/services4/srvkm/include/power.h new file mode 100644 index 0000000..0abaf75 --- /dev/null +++ b/pvr-source/services4/srvkm/include/power.h @@ -0,0 +1,140 @@ +/*************************************************************************/ /*! +@Title Power Management Functions +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Main APIs for power management functions +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#ifndef POWER_H +#define POWER_H + +#if defined(__cplusplus) +extern "C" { +#endif + + +/*! + ***************************************************************************** + * Power management + *****************************************************************************/ + +typedef struct _PVRSRV_POWER_DEV_TAG_ +{ + PFN_PRE_POWER pfnPrePower; + PFN_POST_POWER pfnPostPower; + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange; + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; + IMG_HANDLE hDevCookie; + IMG_UINT32 ui32DeviceIndex; + PVRSRV_DEV_POWER_STATE eDefaultPowerState; + PVRSRV_DEV_POWER_STATE eCurrentPowerState; + struct _PVRSRV_POWER_DEV_TAG_ *psNext; + struct _PVRSRV_POWER_DEV_TAG_ **ppsThis; + +} PVRSRV_POWER_DEV; + +typedef enum _PVRSRV_INIT_SERVER_STATE_ +{ + PVRSRV_INIT_SERVER_Unspecified = -1, + PVRSRV_INIT_SERVER_RUNNING = 0, + PVRSRV_INIT_SERVER_RAN = 1, + PVRSRV_INIT_SERVER_SUCCESSFUL = 2, + PVRSRV_INIT_SERVER_NUM = 3, + PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff + +} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE; + +IMG_IMPORT +IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState); + + + +IMG_IMPORT +PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, + IMG_BOOL bSystemPowerEvent); +IMG_IMPORT +IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex, + PVRSRV_DEV_POWER_STATE eNewPowerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState); +IMG_IMPORT +PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex, + PFN_PRE_POWER pfnPrePower, + PFN_POST_POWER pfnPostPower, + PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, + PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, + IMG_HANDLE hDevCookie, + PVRSRV_DEV_POWER_STATE eCurrentPowerState, + PVRSRV_DEV_POWER_STATE eDefaultPowerState); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex); + +IMG_IMPORT +IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo); + +IMG_IMPORT +IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex, + IMG_BOOL bIdleDevice, + IMG_VOID *pvInfo); + +#if defined (__cplusplus) +} +#endif +#endif /* POWER_H */ + +/****************************************************************************** + End of file (power.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/queue.h b/pvr-source/services4/srvkm/include/queue.h new file mode 100644 index 0000000..58f8093 --- /dev/null +++ b/pvr-source/services4/srvkm/include/queue.h @@ -0,0 +1,145 @@ +/*************************************************************************/ /*! +@Title Command Queue API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Internal structures and definitions for command queues +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef QUEUE_H +#define QUEUE_H + + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! + * Macro to Read Offset in given command queue + */ +#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \ + (psQueue)->ui32ReadOffset = ((psQueue)->ui32ReadOffset + (ui32Size)) \ + & ((psQueue)->ui32QueueSize - 1); + +/*! + generic cmd complete structure. + This structure represents the storage required between starting and finishing + a given cmd and is required to hold the generic sync object update data. + note: for any given system we know what command types we support and + therefore how much storage is required for any number of commands in progress + */ + typedef struct _COMMAND_COMPLETE_DATA_ + { + IMG_BOOL bInUse; + /* <arg(s) to PVRSRVProcessQueues>; */ /*!< TBD */ + IMG_UINT32 ui32DstSyncCount; /*!< number of dst sync objects */ + IMG_UINT32 ui32SrcSyncCount; /*!< number of src sync objects */ + PVRSRV_SYNC_OBJECT *psDstSync; /*!< dst sync ptr list, + allocated on back of this structure */ + PVRSRV_SYNC_OBJECT *psSrcSync; /*!< src sync ptr list, + allocated on back of this structure */ + IMG_UINT32 ui32AllocSize; /*!< allocated size*/ + PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete; /*!< Command complete callback */ + IMG_HANDLE hCallbackData; /*!< Command complete callback data */ + }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA; + +#if !defined(USE_CODE) +IMG_VOID QueueDumpDebugInfo(IMG_VOID); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVProcessQueues (IMG_BOOL bFlush); + +#if defined(__linux__) && defined(__KERNEL__) +#include <linux/types.h> +#include <linux/seq_file.h> +void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off); +void ProcSeqShowQueue(struct seq_file *sfile,void* el); +#endif + + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize, + PVRSRV_QUEUE_INFO **ppsQueueInfo); +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND **ppsCommand, + IMG_UINT32 ui32DevIndex, + IMG_UINT16 CommandType, + IMG_UINT32 ui32DstSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsDstSync[], + IMG_UINT32 ui32SrcSyncCount, + PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[], + IMG_SIZE_T ui32DataByteSize, + PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete, + IMG_HANDLE hCallbackData); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue, + IMG_SIZE_T ui32ParamSize, + IMG_VOID **ppvSpace); + +IMG_IMPORT +PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue, + PVRSRV_COMMAND *psCommand); + +IMG_IMPORT +IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR); + +IMG_IMPORT +PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex, + PFN_CMD_PROC *ppfnCmdProcList, + IMG_UINT32 ui32MaxSyncsPerCmd[][2], + IMG_UINT32 ui32CmdCount); +IMG_IMPORT +PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex, + IMG_UINT32 ui32CmdCount); + +#endif /* !defined(USE_CODE) */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* QUEUE_H */ + +/****************************************************************************** + End of file (queue.h) +******************************************************************************/ diff --git a/pvr-source/services4/srvkm/include/ra.h b/pvr-source/services4/srvkm/include/ra.h new file mode 100644 index 0000000..b84a8e5 --- /dev/null +++ b/pvr-source/services4/srvkm/include/ra.h @@ -0,0 +1,293 @@ +/*************************************************************************/ /*! +@Title Resource Allocator API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef _RA_H_ +#define _RA_H_ + +#include "img_types.h" +#include "hash.h" +#include "osfunc.h" + +/** Resource arena. + * struct _RA_ARENA_ deliberately opaque + */ +typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313 +typedef struct _BM_MAPPING_ BM_MAPPING; + + + +/** Enable support for arena statistics. */ +#define RA_STATS + + +/** Resource arena statistics. */ +struct _RA_STATISTICS_ +{ + /** total number of segments add to the arena */ + IMG_SIZE_T uSpanCount; + + /** number of current live segments within the arena */ + IMG_SIZE_T uLiveSegmentCount; + + /** number of current free segments within the arena */ + IMG_SIZE_T uFreeSegmentCount; + + /** total number of resource within the arena */ + IMG_SIZE_T uTotalResourceCount; + + /** number of free resource within the arena */ + IMG_SIZE_T uFreeResourceCount; + + /** total number of resources allocated from the arena */ + IMG_SIZE_T uCumulativeAllocs; + + /** total number of resources returned to the arena */ + IMG_SIZE_T uCumulativeFrees; + + /** total number of spans allocated by the callback mechanism */ + IMG_SIZE_T uImportCount; + + /** total number of spans deallocated by the callback mechanism */ + IMG_SIZE_T uExportCount; + + IMG_SIZE_T uFailedAllocCount; + +}; +typedef struct _RA_STATISTICS_ RA_STATISTICS; + +struct _RA_SEGMENT_DETAILS_ +{ + IMG_SIZE_T uiSize; + IMG_CPU_PHYADDR sCpuPhyAddr; + IMG_HANDLE hSegment; +}; +typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS; + +/** + * @Function RA_Create + * + * @Description + * + * To create a resource arena. + * + * @Input name - the name of the arena for diagnostic purposes. + * @Input base - the base of an initial resource span or 0. + * @Input uSize - the size of an initial resource span or 0. + * @Input pRef - the reference to return for the initial resource or 0. + * @Input uQuantum - the arena allocation quantum. + * @Input alloc - a resource allocation callback or 0. + * @Input free - a resource de-allocation callback or 0. + * @Input import_handle - handle passed to alloc and free or 0. + * @Return arena handle, or IMG_NULL. + */ +RA_ARENA * +RA_Create (IMG_CHAR *name, + IMG_UINTPTR_T base, + IMG_SIZE_T uSize, + BM_MAPPING *psMapping, + IMG_SIZE_T uQuantum, + IMG_BOOL (*imp_alloc)(IMG_VOID *_h, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase), + IMG_VOID (*imp_free) (IMG_VOID *, + IMG_UINTPTR_T, + BM_MAPPING *), + IMG_VOID (*backingstore_free) (IMG_VOID *, + IMG_SIZE_T, + IMG_SIZE_T, + IMG_HANDLE), + IMG_VOID *import_handle); + +/** + * @Function RA_Delete + * + * @Description + * + * To delete a resource arena. All resources allocated from the arena + * must be freed before deleting the arena. + * + * @Input pArena - the arena to delete. + * @Return None + */ +IMG_VOID +RA_Delete (RA_ARENA *pArena); + +/** + * @Function RA_TestDelete + * + * @Description + * + * To test whether it is safe to delete a resource arena. If any allocations + * have not been freed, the RA must not be deleted. + * + * @Input pArena - the arena to test. + * @Return IMG_BOOL - IMG_TRUE if is safe to go on and call RA_Delete. + */ +IMG_BOOL +RA_TestDelete (RA_ARENA *pArena); + +/** + * @Function RA_Add + * + * @Description + * + * To add a resource span to an arena. The span must not overlap with + * any span previously added to the arena. + * + * @Input pArena - the arena to add a span into. + * @Input base - the base of the span. + * @Input uSize - the extent of the span. + * @Return IMG_TRUE - success, IMG_FALSE - failure + */ +IMG_BOOL +RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize); + +/** + * @Function RA_Alloc + * + * @Description + * + * To allocate resource from an arena. + * + * @Input pArena - the arena + * @Input uRequestSize - the size of resource segment requested. + * @Output pActualSize - the actual_size of resource segment allocated, + * typcially rounded up by quantum. + * @Output ppsMapping - the user reference associated with allocated + * resource span. + * @Input uFlags - flags influencing allocation policy. + * @Input uAlignment - the alignment constraint required for the + * allocated segment, use 0 if alignment not required. + * @Input uAlignmentOffset - the required alignment offset + * @Input pvPrivData - private data passed to OS allocator + * @Input ui32PrivData - length of private data + * + * @Output pBase - allocated base resource + * @Return IMG_TRUE - success, IMG_FALSE - failure + */ +IMG_BOOL +RA_Alloc (RA_ARENA *pArena, + IMG_SIZE_T uSize, + IMG_SIZE_T *pActualSize, + BM_MAPPING **ppsMapping, + IMG_UINT32 uFlags, + IMG_UINT32 uAlignment, + IMG_UINT32 uAlignmentOffset, + IMG_PVOID pvPrivData, + IMG_UINT32 ui32PrivDataLength, + IMG_UINTPTR_T *pBase); + +/** + * @Function RA_Free + * + * @Description To free a resource segment. + * + * @Input pArena - the arena the segment was originally allocated from. + * @Input base - the base of the resource span to free. + * @Input bFreeBackingStore - Should backing store memory be freed? + * + * @Return None + */ +IMG_VOID +RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore); + + +#ifdef RA_STATS + +#define CHECK_SPACE(total) \ +{ \ + if((total)<100) \ + return PVRSRV_ERROR_INVALID_PARAMS; \ +} + +#define UPDATE_SPACE(str, count, total) \ +{ \ + if((count) == -1) \ + return PVRSRV_ERROR_INVALID_PARAMS; \ + else \ + { \ + (str) += (count); \ + (total) -= (count); \ + } \ +} + + +/** + * @Function RA_GetNextLiveSegment + * + * @Description Returns details of the next live resource segments + * + * @Input pArena - the arena the segment was originally allocated from. + * @Output psSegDetails - rtn details of segments + * + * @Return IMG_TRUE if operation succeeded + */ +IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails); + + +/** + * @Function RA_GetStats + * + * @Description gets stats on a given arena + * + * @Input pArena - the arena the segment was originally allocated from. + * @Input ppszStr - string to write stats to + * @Input pui32StrLen - length of string + * + * @Return PVRSRV_ERROR + */ +PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen); + +PVRSRV_ERROR RA_GetStatsFreeMem(RA_ARENA *pArena, + IMG_CHAR **ppszStr, + IMG_UINT32 *pui32StrLen); + +#endif /* #ifdef RA_STATS */ + +#endif + diff --git a/pvr-source/services4/srvkm/include/refcount.h b/pvr-source/services4/srvkm/include/refcount.h new file mode 100644 index 0000000..0e3479d --- /dev/null +++ b/pvr-source/services4/srvkm/include/refcount.h @@ -0,0 +1,203 @@ +/*************************************************************************/ /*! +@Title Services reference count debugging +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __REFCOUNT_H__ +#define __REFCOUNT_H__ + +#include "pvr_bridge_km.h" + +#if defined(PVRSRV_REFCOUNT_DEBUG) + +void PVRSRVDumpRefCountCCB(void); + +#define PVRSRVKernelSyncInfoIncRef(x...) \ + PVRSRVKernelSyncInfoIncRef2(__FILE__, __LINE__, x) +#define PVRSRVKernelSyncInfoDecRef(x...) \ + PVRSRVKernelSyncInfoDecRef2(__FILE__, __LINE__, x) +#define PVRSRVKernelMemInfoIncRef(x...) \ + PVRSRVKernelMemInfoIncRef2(__FILE__, __LINE__, x) +#define PVRSRVKernelMemInfoDecRef(x...) \ + PVRSRVKernelMemInfoDecRef2(__FILE__, __LINE__, x) +#define PVRSRVBMBufIncRef(x...) \ + PVRSRVBMBufIncRef2(__FILE__, __LINE__, x) +#define PVRSRVBMBufDecRef(x...) \ + PVRSRVBMBufDecRef2(__FILE__, __LINE__, x) +#define PVRSRVBMBufIncExport(x...) \ + PVRSRVBMBufIncExport2(__FILE__, __LINE__, x) +#define PVRSRVBMBufDecExport(x...) \ + PVRSRVBMBufDecExport2(__FILE__, __LINE__, x) + +void PVRSRVKernelSyncInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVKernelSyncInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVKernelMemInfoIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVKernelMemInfoDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo); +void PVRSRVBMBufIncRef2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMBufDecRef2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMBufIncExport2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMBufDecExport2(const IMG_CHAR *pszFile, + IMG_INT iLine, BM_BUF *pBuf); +void PVRSRVBMXProcIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_UINT32 ui32Index); +void PVRSRVBMXProcDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + IMG_UINT32 ui32Index); + +#if defined(__linux__) + +/* mmap refcounting is Linux specific */ +#include "mmap.h" + +#define PVRSRVOffsetStructIncRef(x...) \ + PVRSRVOffsetStructIncRef2(__FILE__, __LINE__, x) +#define PVRSRVOffsetStructDecRef(x...) \ + PVRSRVOffsetStructDecRef2(__FILE__, __LINE__, x) +#define PVRSRVOffsetStructIncMapped(x...) \ + PVRSRVOffsetStructIncMapped2(__FILE__, __LINE__, x) +#define PVRSRVOffsetStructDecMapped(x...) \ + PVRSRVOffsetStructDecMapped2(__FILE__, __LINE__, x) + +void PVRSRVOffsetStructIncRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); +void PVRSRVOffsetStructDecRef2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); +void PVRSRVOffsetStructIncMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); +void PVRSRVOffsetStructDecMapped2(const IMG_CHAR *pszFile, IMG_INT iLine, + PKV_OFFSET_STRUCT psOffsetStruct); + +#endif /* defined(__linux__) */ + +#else /* defined(PVRSRV_REFCOUNT_DEBUG) */ + +static INLINE void PVRSRVDumpRefCountCCB(void) { } + +static INLINE void PVRSRVKernelSyncInfoIncRef(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + PVRSRVAcquireSyncInfoKM(psKernelSyncInfo); +} + +static INLINE void PVRSRVKernelSyncInfoDecRef(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo, + PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + PVR_UNREFERENCED_PARAMETER(psKernelMemInfo); + PVRSRVReleaseSyncInfoKM(psKernelSyncInfo); +} + +static INLINE void PVRSRVKernelMemInfoIncRef(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + psKernelMemInfo->ui32RefCount++; +} + +static INLINE void PVRSRVKernelMemInfoDecRef(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo) +{ + psKernelMemInfo->ui32RefCount--; +} + +static INLINE void PVRSRVBMBufIncRef(BM_BUF *pBuf) +{ + pBuf->ui32RefCount++; +} + +static INLINE void PVRSRVBMBufDecRef(BM_BUF *pBuf) +{ + pBuf->ui32RefCount--; +} + +static INLINE void PVRSRVBMBufIncExport(BM_BUF *pBuf) +{ + pBuf->ui32ExportCount++; +} + +static INLINE void PVRSRVBMBufDecExport(BM_BUF *pBuf) +{ + pBuf->ui32ExportCount--; +} + +static INLINE void PVRSRVBMXProcIncRef(IMG_UINT32 ui32Index) +{ + gXProcWorkaroundShareData[ui32Index].ui32RefCount++; +} + +static INLINE void PVRSRVBMXProcDecRef(IMG_UINT32 ui32Index) +{ + gXProcWorkaroundShareData[ui32Index].ui32RefCount--; +} + +#if defined(__linux__) + +/* mmap refcounting is Linux specific */ +#include "mmap.h" + +static INLINE void PVRSRVOffsetStructIncRef(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32RefCount++; +} + +static INLINE void PVRSRVOffsetStructDecRef(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32RefCount--; +} + +static INLINE void PVRSRVOffsetStructIncMapped(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32Mapped++; +} + +static INLINE void PVRSRVOffsetStructDecMapped(PKV_OFFSET_STRUCT psOffsetStruct) +{ + psOffsetStruct->ui32Mapped--; +} + +#endif /* defined(__linux__) */ + +#endif /* defined(PVRSRV_REFCOUNT_DEBUG) */ + +#endif /* __REFCOUNT_H__ */ diff --git a/pvr-source/services4/srvkm/include/resman.h b/pvr-source/services4/srvkm/include/resman.h new file mode 100644 index 0000000..92659d9 --- /dev/null +++ b/pvr-source/services4/srvkm/include/resman.h @@ -0,0 +1,152 @@ +/*************************************************************************/ /*! +@Title Resource Manager API +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Provide resource management +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __RESMAN_H__ +#define __RESMAN_H__ + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************************************************** + * resman definitions + *****************************************************************************/ + +enum { + /* SGX: */ + RESMAN_TYPE_SHARED_PB_DESC = 1, /*!< Parameter buffer kernel stubs */ + RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, /*!< Shared parameter buffer creation lock */ + RESMAN_TYPE_HW_RENDER_CONTEXT, /*!< Hardware Render Context Resource */ + RESMAN_TYPE_HW_TRANSFER_CONTEXT, /*!< Hardware transfer Context Resource */ + RESMAN_TYPE_HW_2D_CONTEXT, /*!< Hardware 2D Context Resource */ + RESMAN_TYPE_TRANSFER_CONTEXT, /*!< Transfer Queue context */ + + /* VGX: */ + RESMAN_TYPE_DMA_CLIENT_FIFO_DATA, /*!< VGX DMA Client FIFO data */ + + /* DISPLAY CLASS: */ + RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, /*!< Display Class Swapchain Reference Resource */ + RESMAN_TYPE_DISPLAYCLASS_DEVICE, /*!< Display Class Device Resource */ + + /* BUFFER CLASS: */ + RESMAN_TYPE_BUFFERCLASS_DEVICE, /*!< Buffer Class Device Resource */ + + /* OS specific User mode Mappings: */ + RESMAN_TYPE_OS_USERMODE_MAPPING, /*!< OS specific User mode mappings */ + + /* COMMON: */ + RESMAN_TYPE_DEVICEMEM_CONTEXT, /*!< Device Memory Context Resource */ + RESMAN_TYPE_DEVICECLASSMEM_MAPPING, /*!< Device Memory Mapping Resource */ + RESMAN_TYPE_DEVICEMEM_MAPPING, /*!< Device Memory Mapping Resource */ + RESMAN_TYPE_DEVICEMEM_WRAP, /*!< Device Memory Wrap Resource */ + RESMAN_TYPE_DEVICEMEM_ALLOCATION, /*!< Device Memory Allocation Resource */ + RESMAN_TYPE_DEVICEMEM_ION, /*!< Device Memory Ion Resource */ + RESMAN_TYPE_EVENT_OBJECT, /*!< Event Object */ + RESMAN_TYPE_SHARED_MEM_INFO, /*!< Shared system memory meminfo */ + RESMAN_TYPE_MODIFY_SYNC_OPS, /*!< Syncobject synchronisation Resource*/ + RESMAN_TYPE_SYNC_INFO, /*!< Syncobject Resource*/ + + /* KERNEL: */ + RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION /*!< Device Memory Allocation Resource */ +}; + +#define RESMAN_CRITERIA_ALL 0x00000000 /*!< match by criteria all */ +#define RESMAN_CRITERIA_RESTYPE 0x00000001 /*!< match by criteria type */ +#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002 /*!< match by criteria param1 */ +#define RESMAN_CRITERIA_UI32_PARAM 0x00000004 /*!< match by criteria param2 */ + +typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_PVOID pvParam, IMG_UINT32 ui32Param, IMG_BOOL bForceCleanup); + +typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM; +typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT; + +/****************************************************************************** + * resman functions + *****************************************************************************/ + +/* + Note: + Resource cleanup can fail with retry in which case we don't remove + it from resman's list and either UM or KM will try to release the + resource at a later date (and will keep trying until a non-retry + error is returned) +*/ + +PVRSRV_ERROR ResManInit(IMG_VOID); +IMG_VOID ResManDeInit(IMG_VOID); + +PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param, + RESMAN_FREE_FN pfnFreeResource); + +PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem, + IMG_BOOL bForceCleanup); + +PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext, + IMG_UINT32 ui32SearchCriteria, + IMG_UINT32 ui32ResType, + IMG_PVOID pvParam, + IMG_UINT32 ui32Param); + +PVRSRV_ERROR ResManDissociateRes(PRESMAN_ITEM psResItem, + PRESMAN_CONTEXT psNewResManContext); + +PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext, + PRESMAN_ITEM psItem); + +PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc, + PRESMAN_CONTEXT *phResManContext); +IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext, + IMG_BOOL bKernelContext); + +#if defined (__cplusplus) +} +#endif + +#endif /* __RESMAN_H__ */ + +/****************************************************************************** + End of file (resman.h) +******************************************************************************/ + diff --git a/pvr-source/services4/srvkm/include/services_headers.h b/pvr-source/services4/srvkm/include/services_headers.h new file mode 100644 index 0000000..d09b8a8 --- /dev/null +++ b/pvr-source/services4/srvkm/include/services_headers.h @@ -0,0 +1,68 @@ +/*************************************************************************/ /*! +@Title Command queues and synchronisation +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Internal structures and definitions for command queues and + synchronisation +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + + +#ifndef SERVICES_HEADERS_H +#define SERVICES_HEADERS_H + +#ifdef DEBUG_RELEASE_BUILD +#pragma optimize( "", off ) +#define DEBUG 1 +#endif + +#include "img_defs.h" +#include "services.h" +#include "servicesint.h" +#include "power.h" +#include "resman.h" +#include "queue.h" +#include "srvkm.h" +#include "kerneldisplay.h" +#include "syscommon.h" +#include "pvr_debug.h" +#include "metrics.h" +#include "osfunc.h" +#include "refcount.h" + +#endif /* SERVICES_HEADERS_H */ + diff --git a/pvr-source/services4/srvkm/include/srvkm.h b/pvr-source/services4/srvkm/include/srvkm.h new file mode 100644 index 0000000..5d396f8 --- /dev/null +++ b/pvr-source/services4/srvkm/include/srvkm.h @@ -0,0 +1,129 @@ +/*************************************************************************/ /*! +@Title Services kernel module internal header file +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef SRVKM_H +#define SRVKM_H + + +#if defined(__cplusplus) +extern "C" { +#endif + + /** Use PVR_DPF() unless message is necessary in release build + */ + #ifdef PVR_DISABLE_LOGGING + #define PVR_LOG(X) + #else + /* PRQA S 3410 1 */ /* this macro requires no brackets in order to work */ + #define PVR_LOG(X) PVRSRVReleasePrintf X; + #endif + + IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) IMG_FORMAT_PRINTF(1, 2); + + IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags); + IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID); + + IMG_IMPORT IMG_VOID PVRSRVScheduleDevicesKM(IMG_VOID); + + IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State); + + PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_SIZE_T *puiBufSize, IMG_BOOL bSave); + + IMG_VOID PVRSRVScheduleDeviceCallbacks(IMG_VOID); + + +#if defined (__cplusplus) +} +#endif + +/****************** +HIGHER LEVEL MACROS +*******************/ + +/*---------------------------------------------------------------------------- +Repeats the body of the loop for a certain minimum time, or until the body +exits by its own means (break, return, goto, etc.) + +Example of usage: + +LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) +{ + if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) + { + bTimeout = IMG_FALSE; + break; + } + + OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); +} END_LOOP_UNTIL_TIMEOUT(); + +-----------------------------------------------------------------------------*/ + +/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time + * it will be decremented and the loop executed one final time. This is necessary + * when preemption is enabled. + */ +/* PRQA S 3411,3431 12 */ /* critical format, leave alone */ +#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \ +{\ + IMG_UINT32 uiOffset, uiStart, uiCurrent; \ + IMG_INT32 iNotLastLoop; \ + for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\ + ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \ + uiCurrent = OSClockus(), \ + uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \ + uiStart = uiCurrent < uiStart ? 0 : uiStart) + +#define END_LOOP_UNTIL_TIMEOUT() \ +} + +/*! + ****************************************************************************** + + @Function PVRSRVGetErrorStringKM + + @Description Returns a text string relating to the PVRSRV_ERROR enum. + + ******************************************************************************/ +IMG_IMPORT +const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError); + +#endif /* SRVKM_H */ diff --git a/pvr-source/services4/srvkm/include/ttrace.h b/pvr-source/services4/srvkm/include/ttrace.h new file mode 100644 index 0000000..cb70ff8 --- /dev/null +++ b/pvr-source/services4/srvkm/include/ttrace.h @@ -0,0 +1,200 @@ +/*************************************************************************/ /*! +@Title Timed Trace header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Timed Trace header. Contines structures and functions used + in the timed trace subsystem. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "services_headers.h" +#include "ttrace_common.h" +#include "ttrace_tokens.h" + +#ifndef __TTRACE_H__ +#define __TTRACE_H__ + +#if defined(TTRACE) + + #define PVR_TTRACE(group, class, token) \ + PVRSRVTimeTrace(group, class, token) + #define PVR_TTRACE_UI8(group, class, token, val) \ + PVRSRVTimeTraceUI8(group, class, token, val) + #define PVR_TTRACE_UI16(group, class, token, val) \ + PVRSRVTimeTraceUI16(group, class, token, val) + #define PVR_TTRACE_UI32(group, class, token, val) \ + PVRSRVTimeTraceUI32(group, class, token, val) + #define PVR_TTRACE_UI64(group, class, token, val) \ + PVRSRVTimeTraceUI64(group, class, token, val) + #define PVR_TTRACE_DEV_VIRTADDR(group, class, token, val) \ + PVRSRVTimeTraceDevVirtAddr(group, class, token, val) + #define PVR_TTRACE_CPU_PHYADDR(group, class, token, val) \ + PVRSRVTimeTraceCpuPhyAddr(group, class, token, val) + #define PVR_TTRACE_DEV_PHYADDR(group, class, token, val) \ + PVRSRVTimeTraceDevPhysAddr(group, class, token, val) + #define PVR_TTRACE_SYS_PHYADDR(group, class, token, val) \ + PVRSRVTimeTraceSysPhysAddr(group, class, token, val) + #define PVR_TTRACE_SYNC_OBJECT(group, token, syncobj, op) \ + PVRSRVTimeTraceSyncObject(group, token, syncobj, op) + +IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTimeTraceArray(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT32 ui32TypeSize, + IMG_UINT32 ui32Count, IMG_UINT8 *ui8Data); + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTrace) +#endif +static INLINE IMG_VOID PVRSRVTimeTrace(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, 0, 0, NULL); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI8) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI8(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT8 ui8Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI8, + 1, &ui8Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI16) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI16(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT16 ui16Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI16, + 1, (IMG_UINT8 *) &ui16Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI32) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI32(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT32 ui32Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &ui32Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceUI64) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceUI64(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_UINT64 ui64Value) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI64, + 1, (IMG_UINT8 *) &ui64Value); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceDevVirtAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceDevVirtAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_DEV_VIRTADDR psVAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &psVAddr.uiAddr); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceCpuPhyAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceCpuPhyAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_CPU_PHYADDR psPAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &psPAddr.uiAddr); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceDevPhysAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceDevPhysAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_DEV_PHYADDR psPAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, PVRSRV_TRACE_TYPE_UI32, + 1, (IMG_UINT8 *) &psPAddr.uiAddr); +} + +#ifdef INLINE_IS_PRAGMA +#pragma inline(PVRSRVTimeTraceSysPhysAddr) +#endif +static INLINE IMG_VOID PVRSRVTimeTraceSysPhysAddr(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, + IMG_UINT32 ui32Token, IMG_SYS_PHYADDR psPAddr) +{ + PVRSRVTimeTraceArray(ui32Group, ui32Class, ui32Token, sizeof(psPAddr.uiAddr), + 1, (IMG_UINT8 *) &psPAddr.uiAddr); +} + +#else /* defined(PVRSRV_NEED_PVR_TIME_TRACE) */ + + #define PVR_TTRACE(group, class, token) \ + ((void) 0) + #define PVR_TTRACE_UI8(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_UI16(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_UI32(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_UI64(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_DEV_VIRTADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_CPU_PHYADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_DEV_PHYADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_SYS_PHYADDR(group, class, token, val) \ + ((void) 0) + #define PVR_TTRACE_SYNC_OBJECT(group, token, syncobj, op) \ + ((void) 0) + +#endif /* defined(PVRSRV_NEED_PVR_TIME_TRACE) */ + +IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceInit(IMG_VOID); +IMG_IMPORT IMG_VOID PVRSRVTimeTraceDeinit(IMG_VOID); + +IMG_IMPORT IMG_VOID PVRSRVTimeTraceSyncObject(IMG_UINT32 ui32Group, IMG_UINT32 ui32Token, + PVRSRV_KERNEL_SYNC_INFO *psSync, IMG_UINT8 ui8SyncOp); +IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceBufferCreate(IMG_UINT32 ui32PID); +IMG_IMPORT PVRSRV_ERROR PVRSRVTimeTraceBufferDestroy(IMG_UINT32 ui32PID); + +IMG_IMPORT IMG_VOID PVRSRVDumpTimeTraceBuffers(IMG_VOID); +#endif /* __TTRACE_H__ */ diff --git a/pvr-source/services4/srvkm/include/ttrace_common.h b/pvr-source/services4/srvkm/include/ttrace_common.h new file mode 100644 index 0000000..b14f256 --- /dev/null +++ b/pvr-source/services4/srvkm/include/ttrace_common.h @@ -0,0 +1,146 @@ +/*************************************************************************/ /*! +@Title Timed Trace header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Timed Trace common header. Contains shared defines and + structures which are shared with the post processing tool. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ +#include "img_types.h" + +#ifndef __TTRACE_COMMON_H__ +#define __TTRACE_COMMON_H__ + +/* + * Trace item + * ========== + * + * A trace item contains a trace header, a timestamp, a UID and a + * data header all of which are 32-bit and mandatory. If there + * is no data then the data header size is set to 0. + * + * Trace header + * ------------ + * 31 27 23 19 15 11 7 3 + * GGGG GGGG CCCC CCCC TTTT TTTT TTTT TTTT + * + * G = group + * Note: + * Group 0xff means the message is padding + * + * C = class + * T = Token + * + * Data header + *----------- + * 31 27 23 19 15 11 7 3 + * SSSS SSSS SSSS SSSS TTTT CCCC CCCC CCCC + * + * S = data packet size + * T = Type + * 0000 - 8 bit + * 0001 - 16 bit + * 0010 - 32 bit + * 0011 - 64 bit + * + * C = data item count + * + * Note: It might look strange having both the packet + * size and the data item count, but the idea + * is the you might have a "special" data type + * who's size might not be known by the post + * processing program and rather then fail + * processing the buffer after that point if we + * know the size we can just skip it and move to + * the next item. + */ + + +#define PVRSRV_TRACE_HEADER 0 +#define PVRSRV_TRACE_TIMESTAMP 1 +#define PVRSRV_TRACE_HOSTUID 2 +#define PVRSRV_TRACE_DATA_HEADER 3 +#define PVRSRV_TRACE_DATA_PAYLOAD 4 + +#define PVRSRV_TRACE_ITEM_SIZE 16 + +#define PVRSRV_TRACE_GROUP_MASK 0xff +#define PVRSRV_TRACE_CLASS_MASK 0xff +#define PVRSRV_TRACE_TOKEN_MASK 0xffff + +#define PVRSRV_TRACE_GROUP_SHIFT 24 +#define PVRSRV_TRACE_CLASS_SHIFT 16 +#define PVRSRV_TRACE_TOKEN_SHIFT 0 + +#define PVRSRV_TRACE_SIZE_MASK 0xffff +#define PVRSRV_TRACE_TYPE_MASK 0xf +#define PVRSRV_TRACE_COUNT_MASK 0xfff + +#define PVRSRV_TRACE_SIZE_SHIFT 16 +#define PVRSRV_TRACE_TYPE_SHIFT 12 +#define PVRSRV_TRACE_COUNT_SHIFT 0 + + +#define WRITE_HEADER(n,m) \ + ((m & PVRSRV_TRACE_##n##_MASK) << PVRSRV_TRACE_##n##_SHIFT) + +#define READ_HEADER(n,m) \ + ((m & (PVRSRV_TRACE_##n##_MASK << PVRSRV_TRACE_##n##_SHIFT)) >> PVRSRV_TRACE_##n##_SHIFT) + +#define TIME_TRACE_BUFFER_SIZE 4096 + +/* Type defines for trace items */ +#define PVRSRV_TRACE_TYPE_UI8 0 +#define PVRSRV_TRACE_TYPE_UI16 1 +#define PVRSRV_TRACE_TYPE_UI32 2 +#define PVRSRV_TRACE_TYPE_UI64 3 + +#define PVRSRV_TRACE_TYPE_SYNC 15 + #define PVRSRV_TRACE_SYNC_UID 0 + #define PVRSRV_TRACE_SYNC_WOP 1 + #define PVRSRV_TRACE_SYNC_WOC 2 + #define PVRSRV_TRACE_SYNC_ROP 3 + #define PVRSRV_TRACE_SYNC_ROC 4 + #define PVRSRV_TRACE_SYNC_WO_DEV_VADDR 5 + #define PVRSRV_TRACE_SYNC_RO_DEV_VADDR 6 + #define PVRSRV_TRACE_SYNC_OP 7 + #define PVRSRV_TRACE_SYNC_RO2P 8 + #define PVRSRV_TRACE_SYNC_RO2C 9 + #define PVRSRV_TRACE_SYNC_RO2_DEV_VADDR 10 +#define PVRSRV_TRACE_TYPE_SYNC_SIZE ((PVRSRV_TRACE_SYNC_RO2_DEV_VADDR + 1) * sizeof(IMG_UINT32)) + +#endif /* __TTRACE_COMMON_H__*/ diff --git a/pvr-source/services4/srvkm/include/ttrace_tokens.h b/pvr-source/services4/srvkm/include/ttrace_tokens.h new file mode 100644 index 0000000..24bc484 --- /dev/null +++ b/pvr-source/services4/srvkm/include/ttrace_tokens.h @@ -0,0 +1,119 @@ +/*************************************************************************/ /*! +@Title Timed Trace header +@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved +@Description Timed Trace token header. Contains defines for all the tokens + used. +@License Dual MIT/GPLv2 + +The contents of this file are subject to the MIT license as set out below. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +Alternatively, the contents of this file may be used under the terms of +the GNU General Public License Version 2 ("GPL") in which case the provisions +of GPL are applicable instead of those above. + +If you wish to allow use of your version of this file only under the terms of +GPL, and not to allow others to use your version of this file under the terms +of the MIT license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by GPL as set +out in the file called "GPL-COPYING" included in this distribution. If you do +not delete the provisions above, a recipient may use your version of this file +under the terms of either the MIT license or GPL. + +This License is also included in this distribution in the file called +"MIT-COPYING". + +EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS +PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ /**************************************************************************/ + +#ifndef __TTRACE_TOKENS_H__ +#define __TTRACE_TOKENS_H__ + +/* All defines should use decimal so to not confuse the post processing tool */ + +/* Trace groups */ +#define PVRSRV_TRACE_GROUP_KICK 0 +#define PVRSRV_TRACE_GROUP_TRANSFER 1 +#define PVRSRV_TRACE_GROUP_QUEUE 2 +#define PVRSRV_TRACE_GROUP_POWER 3 +#define PVRSRV_TRACE_GROUP_MKSYNC 4 + +#define PVRSRV_TRACE_GROUP_PADDING 255 + +/* Trace classes */ +#define PVRSRV_TRACE_CLASS_FUNCTION_ENTER 0 +#define PVRSRV_TRACE_CLASS_FUNCTION_EXIT 1 +#define PVRSRV_TRACE_CLASS_SYNC 2 +#define PVRSRV_TRACE_CLASS_CCB 3 +#define PVRSRV_TRACE_CLASS_CMD_START 4 +#define PVRSRV_TRACE_CLASS_CMD_END 5 +#define PVRSRV_TRACE_CLASS_CMD_COMP_START 6 +#define PVRSRV_TRACE_CLASS_CMD_COMP_END 7 +#define PVRSRV_TRACE_CLASS_FLAGS 8 + +#define PVRSRV_TRACE_CLASS_NONE 255 + +/* Operation about to happen on the sync object */ +#define PVRSRV_SYNCOP_SAMPLE 0 +#define PVRSRV_SYNCOP_COMPLETE 1 +#define PVRSRV_SYNCOP_DUMP 2 + +/* + * Trace tokens + * ------------ + * These only need to unique within a group. + */ + +/* Kick group tokens */ +#define KICK_TOKEN_DOKICK 0 +#define KICK_TOKEN_CCB_OFFSET 1 +#define KICK_TOKEN_TA3D_SYNC 2 +#define KICK_TOKEN_TA_SYNC 3 +#define KICK_TOKEN_3D_SYNC 4 +#define KICK_TOKEN_SRC_SYNC 5 +#define KICK_TOKEN_DST_SYNC 6 +#define KICK_TOKEN_FIRST_KICK 7 +#define KICK_TOKEN_LAST_KICK 8 + +/* Transfer Queue group tokens */ +#define TRANSFER_TOKEN_SUBMIT 0 +#define TRANSFER_TOKEN_TA_SYNC 1 +#define TRANSFER_TOKEN_3D_SYNC 2 +#define TRANSFER_TOKEN_SRC_SYNC 3 +#define TRANSFER_TOKEN_DST_SYNC 4 +#define TRANSFER_TOKEN_CCB_OFFSET 5 + +/* Queue group tokens */ +#define QUEUE_TOKEN_GET_SPACE 0 +#define QUEUE_TOKEN_INSERTKM 1 +#define QUEUE_TOKEN_SUBMITKM 2 +#define QUEUE_TOKEN_PROCESS_COMMAND 3 +#define QUEUE_TOKEN_PROCESS_QUEUES 4 +#define QUEUE_TOKEN_COMMAND_COMPLETE 5 +#define QUEUE_TOKEN_UPDATE_DST 6 +#define QUEUE_TOKEN_UPDATE_SRC 7 +#define QUEUE_TOKEN_SRC_SYNC 8 +#define QUEUE_TOKEN_DST_SYNC 9 +#define QUEUE_TOKEN_COMMAND_TYPE 10 + +/* uKernel Sync tokens */ +#define MKSYNC_TOKEN_KERNEL_CCB_OFFSET 0 +#define MKSYNC_TOKEN_CORE_CLK 1 +#define MKSYNC_TOKEN_UKERNEL_CLK 2 + +#endif /* __TTRACE_TOKENS_H__ */ |