Update Files

This commit is contained in:
2025-01-22 16:18:30 +01:00
parent ed4603cf95
commit a36294b518
16718 changed files with 2960346 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,169 @@
/********************************************************************************/ /**
\file OVR_CAPI_Prototypes.h
\brief Internal CAPI prototype listing macros
\copyright Copyright (c) Facebook Technologies, LLC and its affiliates. All rights reserved.
************************************************************************************/
#ifndef OVR_CAPI_Prototypes_h
#define OVR_CAPI_Prototypes_h
#include "OVR_CAPI.h"
//
// OVR_LIST_*_APIS - apply passed in macros to a list of API entrypoints
//
// The _ macro argument is applied for all current API versions
// The X macro argument is applied for back-compat API versions
//
// The tuple passed to either macro is (ReturnType, FunctionName, OptionalVersion, ParameterList)
//
struct ovrViewportStencilDesc_;
typedef struct ovrViewportStencilDesc_ ovrViewportStencilDesc;
// clang-format off
#define OVR_LIST_PUBLIC_APIS(_,X) \
X(ovrBool, ovr_InitializeRenderingShimVersion, , (int requestedMinorVersion)) \
_(ovrResult, ovr_Initialize, , (const ovrInitParams* params)) \
_(void, ovr_Shutdown, , (void)) \
_(const char*, ovr_GetVersionString, , (void)) \
_(void, ovr_GetLastErrorInfo, , (ovrErrorInfo* errorInfo)) \
_(ovrHmdDesc, ovr_GetHmdDesc, , (ovrSession session)) \
_(unsigned int, ovr_GetTrackerCount, , (ovrSession session)) \
_(ovrTrackerDesc, ovr_GetTrackerDesc, , (ovrSession session, unsigned int trackerDescIndex)) \
_(ovrResult, ovr_Create, , (ovrSession* pSession, ovrGraphicsLuid* pLuid)) \
_(void, ovr_Destroy, , (ovrSession session)) \
_(ovrResult, ovr_GetSessionStatus, , (ovrSession session, ovrSessionStatus* sessionStatus)) \
_(ovrResult, ovr_IsExtensionSupported, , (ovrSession session, ovrExtensions extension, ovrBool* outExtensionSupported)) \
_(ovrResult, ovr_EnableExtension, , (ovrSession session, ovrExtensions extension)) \
_(ovrResult, ovr_SetTrackingOriginType, , (ovrSession session, ovrTrackingOrigin origin)) \
_(ovrTrackingOrigin, ovr_GetTrackingOriginType, , (ovrSession session)) \
_(ovrResult, ovr_RecenterTrackingOrigin, , (ovrSession session)) \
_(ovrResult, ovr_SpecifyTrackingOrigin, , (ovrSession session, ovrPosef originPose)) \
_(void, ovr_ClearShouldRecenterFlag, , (ovrSession session)) \
_(ovrTrackingState, ovr_GetTrackingState, , (ovrSession session, double absTime, ovrBool latencyMarker)) \
_(ovrResult, ovr_GetDevicePoses, , (ovrSession session, ovrTrackedDeviceType* deviceTypes, int deviceCount, double absTime, ovrPoseStatef* outDevicePoses)) \
_(ovrTrackerPose, ovr_GetTrackerPose, , (ovrSession session, unsigned int index)) \
_(ovrResult, ovr_GetInputState, , (ovrSession session, ovrControllerType controllerType, ovrInputState*)) \
_(unsigned int, ovr_GetConnectedControllerTypes, , (ovrSession session)) \
_(ovrSizei, ovr_GetFovTextureSize, , (ovrSession session, ovrEyeType eye, ovrFovPort fov, float pixelsPerDisplayPixel)) \
X(ovrResult, ovr_GetViewportStencil, , (ovrSession session, const ovrViewportStencilDesc* viewportStencilDesc, ovrFovStencilMeshBuffer* meshBuffer)) \
_(ovrResult, ovr_GetFovStencil, , (ovrSession session, const ovrFovStencilDesc* fovStencilDesc, ovrFovStencilMeshBuffer* meshBuffer)) \
_(ovrResult, ovr_WaitToBeginFrame, , (ovrSession session, long long frameIndex)) \
_(ovrResult, ovr_BeginFrame, , (ovrSession session, long long frameIndex)) \
_(ovrResult, ovr_EndFrame, , (ovrSession session, long long frameIndex, const ovrViewScaleDesc* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \
X(ovrResult, ovr_SubmitFrame, , (ovrSession session, long long frameIndex, const ovrViewScaleDescPre117* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \
_(ovrResult, ovr_SubmitFrame, 2, (ovrSession session, long long frameIndex, const ovrViewScaleDesc* viewScaleDesc, ovrLayerHeader const * const * layerPtrList, unsigned int layerCount)) \
X(ovrEyeRenderDescPre117, ovr_GetRenderDesc, , (ovrSession session, ovrEyeType eyeType, ovrFovPort fov)) \
_(ovrEyeRenderDesc, ovr_GetRenderDesc, 2, (ovrSession session, ovrEyeType eyeType, ovrFovPort fov)) \
_(double, ovr_GetPredictedDisplayTime, , (ovrSession session, long long frameIndex)) \
_(double, ovr_GetTimeInSeconds, , (void)) \
_(ovrBool, ovr_GetBool, , (ovrSession session, const char* propertyName, ovrBool defaultVal)) \
_(ovrBool, ovr_SetBool, , (ovrSession session, const char* propertyName, ovrBool value)) \
_(int, ovr_GetInt, , (ovrSession session, const char* propertyName, int defaultVal)) \
_(ovrBool, ovr_SetInt, , (ovrSession session, const char* propertyName, int value)) \
_(float, ovr_GetFloat, , (ovrSession session, const char* propertyName, float defaultVal)) \
_(ovrBool, ovr_SetFloat, , (ovrSession session, const char* propertyName, float value)) \
_(unsigned int, ovr_GetFloatArray, , (ovrSession session, const char* propertyName, float values[], unsigned int arraySize)) \
_(ovrBool, ovr_SetFloatArray, , (ovrSession session, const char* propertyName, const float values[], unsigned int arraySize)) \
_(const char*, ovr_GetString, , (ovrSession session, const char* propertyName, const char* defaultVal)) \
_(ovrBool, ovr_SetString, , (ovrSession session, const char* propertyName, const char* value)) \
_(int, ovr_TraceMessage, , (int level, const char* message)) \
_(ovrResult, ovr_IdentifyClient, , (const char* identity)) \
_(ovrResult, ovr_CreateTextureSwapChainGL, , (ovrSession session, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* outTextureChain)) \
_(ovrResult, ovr_CreateMirrorTextureGL, , (ovrSession session, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \
_(ovrResult, ovr_CreateMirrorTextureWithOptionsGL, , (ovrSession session, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \
_(ovrResult, ovr_GetTextureSwapChainBufferGL, , (ovrSession session, ovrTextureSwapChain chain, int index, unsigned int* texId)) \
_(ovrResult, ovr_GetMirrorTextureBufferGL, , (ovrSession session, ovrMirrorTexture mirror, unsigned int* texId)) \
_(ovrResult, ovr_GetTextureSwapChainLength, , (ovrSession session, ovrTextureSwapChain chain, int* length)) \
_(ovrResult, ovr_GetTextureSwapChainCurrentIndex, , (ovrSession session, ovrTextureSwapChain chain, int* currentIndex)) \
_(ovrResult, ovr_GetTextureSwapChainDesc, , (ovrSession session, ovrTextureSwapChain chain, ovrTextureSwapChainDesc* desc)) \
_(ovrResult, ovr_CommitTextureSwapChain, , (ovrSession session, ovrTextureSwapChain chain)) \
_(void, ovr_DestroyTextureSwapChain, , (ovrSession session, ovrTextureSwapChain chain)) \
_(void, ovr_DestroyMirrorTexture, , (ovrSession session, ovrMirrorTexture texture)) \
X(ovrResult, ovr_SetQueueAheadFraction, , (ovrSession session, float queueAheadFraction)) \
_(ovrResult, ovr_Lookup, , (const char* name, void** data)) \
_(ovrTouchHapticsDesc, ovr_GetTouchHapticsDesc, , (ovrSession session, ovrControllerType controllerType)) \
_(ovrResult, ovr_SetControllerVibration, , (ovrSession session, ovrControllerType controllerType, float frequency, float amplitude)) \
_(ovrResult, ovr_SubmitControllerVibration, , (ovrSession session, ovrControllerType controllerType, const ovrHapticsBuffer* buffer)) \
_(ovrResult, ovr_GetControllerVibrationState, , (ovrSession session, ovrControllerType controllerType, ovrHapticsPlaybackState* outState)) \
_(ovrResult, ovr_TestBoundary, , (ovrSession session, ovrTrackedDeviceType deviceBitmask, ovrBoundaryType singleBoundaryType, ovrBoundaryTestResult* outTestResult)) \
_(ovrResult, ovr_TestBoundaryPoint, , (ovrSession session, const ovrVector3f* point, ovrBoundaryType singleBoundaryType, ovrBoundaryTestResult* outTestResult)) \
_(ovrResult, ovr_SetBoundaryLookAndFeel, , (ovrSession session, const ovrBoundaryLookAndFeel* lookAndFeel)) \
_(ovrResult, ovr_ResetBoundaryLookAndFeel, , (ovrSession session)) \
_(ovrResult, ovr_GetBoundaryGeometry, , (ovrSession session, ovrBoundaryType singleBoundaryType, ovrVector3f* outFloorPoints, int* outFloorPointsCount)) \
_(ovrResult, ovr_GetBoundaryDimensions, , (ovrSession session, ovrBoundaryType singleBoundaryType, ovrVector3f* outDimension)) \
_(ovrResult, ovr_GetBoundaryVisible, , (ovrSession session, ovrBool* outIsVisible)) \
_(ovrResult, ovr_RequestBoundaryVisible, , (ovrSession session, ovrBool visible)) \
_(ovrResult, ovr_GetPerfStats, , (ovrSession session, ovrPerfStats* outPerfStats)) \
_(ovrResult, ovr_ResetPerfStats, , (ovrSession session))\
_(ovrResult, ovr_GetExternalCameras, , (ovrSession session, ovrExternalCamera* outCameras, unsigned int* outCameraCount))\
_(ovrResult, ovr_SetExternalCameraProperties, , (ovrSession session, const char* name, const ovrCameraIntrinsics* const intrinsics, const ovrCameraExtrinsics* const extrinsics ))
#if defined (_WIN32)
#define OVR_LIST_WIN32_APIS(_,X) \
_(ovrResult, ovr_CreateTextureSwapChainDX, , (ovrSession session, IUnknown* d3dPtr, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* outTextureChain)) \
_(ovrResult, ovr_CreateMirrorTextureDX, , (ovrSession session, IUnknown* d3dPtr, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \
_(ovrResult, ovr_CreateMirrorTextureWithOptionsDX, , (ovrSession session, IUnknown* d3dPtr, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* outMirrorTexture)) \
_(ovrResult, ovr_GetTextureSwapChainBufferDX, , (ovrSession session, ovrTextureSwapChain chain, int index, IID iid, void** ppObject)) \
_(ovrResult, ovr_GetMirrorTextureBufferDX, , (ovrSession session, ovrMirrorTexture mirror, IID iid, void** ppObject)) \
_(ovrResult, ovr_GetAudioDeviceOutWaveId, , (UINT* deviceOutId)) \
_(ovrResult, ovr_GetAudioDeviceInWaveId, , (UINT* deviceInId)) \
_(ovrResult, ovr_GetAudioDeviceOutGuidStr, , (WCHAR* deviceOutStrBuffer)) \
_(ovrResult, ovr_GetAudioDeviceOutGuid, , (GUID* deviceOutGuid)) \
_(ovrResult, ovr_GetAudioDeviceInGuidStr, , (WCHAR* deviceInStrBuffer)) \
_(ovrResult, ovr_GetAudioDeviceInGuid, , (GUID* deviceInGuid)) \
_(ovrResult, ovr_GetInstanceExtensionsVk, , (ovrGraphicsLuid luid, char* extensionNames, uint32_t* inoutExtensionNamesSize)) \
_(ovrResult, ovr_GetDeviceExtensionsVk, , (ovrGraphicsLuid luid, char* extensionNames, uint32_t* inoutExtensionNamesSize)) \
_(ovrResult, ovr_GetSessionPhysicalDeviceVk, , (ovrSession session, ovrGraphicsLuid luid, VkInstance instance, VkPhysicalDevice* out_physicalDevice)) \
X(ovrResult, ovr_SetSynchonizationQueueVk, , (ovrSession session, VkQueue queue)) \
_(ovrResult, ovr_SetSynchronizationQueueVk, , (ovrSession session, VkQueue queue)) \
_(ovrResult, ovr_CreateTextureSwapChainVk, , (ovrSession session, VkDevice device, const ovrTextureSwapChainDesc* desc, ovrTextureSwapChain* out_TextureSwapChain)) \
_(ovrResult, ovr_GetTextureSwapChainBufferVk, , (ovrSession session, ovrTextureSwapChain chain, int index, VkImage* out_Image)) \
_(ovrResult, ovr_CreateMirrorTextureWithOptionsVk, , (ovrSession session, VkDevice device, const ovrMirrorTextureDesc* desc, ovrMirrorTexture* out_MirrorTexture)) \
_(ovrResult, ovr_GetMirrorTextureBufferVk, , (ovrSession session, ovrMirrorTexture mirrorTexture, VkImage* out_Image))
#else
#define OVR_LIST_WIN32_APIS(_,X)
#endif
#define OVR_LIST_INTERNAL_APIS(_,X)
// We need to forward declare the ovrSensorData type here, as it won't be in a public OVR_CAPI.h header.
struct ovrSensorData_;
typedef struct ovrSensorData_ ovrSensorData;
// Hybrid Apps API forward declaration which won't be in a public OVR_CAPI.h header for now.
// --------------------------------------------------------------------------
struct ovrDesktopWindowDesc_;
typedef struct ovrDesktopWindowDesc_ ovrDesktopWindowDesc;
struct ovrKeyboardDesc_;
typedef struct ovrKeyboardDesc_ ovrKeyboardDesc;
enum ovrHybridInputFocusType_ ;
typedef enum ovrHybridInputFocusType_ ovrHybridInputFocusType;
struct ovrHybridInputFocusState_;
typedef struct ovrHybridInputFocusState_ ovrHybridInputFocusState;
typedef uint32_t ovrDesktopWindowHandle;
// --------------------------------------------------------------------------
#define OVR_LIST_PRIVATE_APIS(_,X)
// clang-format on
//
// OVR_LIST_APIS - master list of all API entrypoints
//
#define OVR_LIST_APIS(_, X) \
OVR_LIST_PUBLIC_APIS(_, X) \
OVR_LIST_WIN32_APIS(_, X) \
OVR_LIST_INTERNAL_APIS(_, X) \
OVR_LIST_PRIVATE_APIS(_, X)
#endif // OVR_CAPI_Prototypes_h

View File

@ -0,0 +1,437 @@
/************************************************************************************
PublicHeader: OVR_CAPI_Util.c
Copyright : Copyright (c) Facebook Technologies, LLC and its affiliates. All rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.3
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*************************************************************************************/
#include <Extras/OVR_CAPI_Util.h>
#include <Extras/OVR_StereoProjection.h>
#include <limits.h>
#if !defined(_WIN32)
#include <assert.h>
#endif
#if defined(_MSC_VER) && _MSC_VER < 1800 // MSVC < 2013
#define round(dbl) \
(dbl) >= 0.0 ? (int)((dbl) + 0.5) \
: (((dbl) - (double)(int)(dbl)) <= -0.5 ? (int)(dbl) : (int)((dbl)-0.5))
#endif
#if defined(_MSC_VER)
#include <emmintrin.h>
#pragma intrinsic(_mm_pause)
#endif
#if defined(_WIN32)
#include <windows.h>
#endif
#if defined(OVR_DLL_BUILD) && defined(OVR_OPENXR_SUPPORT_ENABLED)
// This forces transitive export of the symbols marked for export in OVR_OpenXR_Impl.cpp:
__pragma(comment(linker, "/INCLUDE:" OVR_ON32("_") "exported_openxr_version"))
#endif // defined(OVR_DLL_BUILD) && defined(OVR_OPENXR_SUPPORT_ENABLED)
template <typename T>
T ovrMax(T a, T b) {
return a > b ? a : b;
}
template <typename T>
T ovrMin(T a, T b) {
return a < b ? a : b;
}
// Used to generate projection from ovrEyeDesc::Fov
OVR_PUBLIC_FUNCTION(ovrMatrix4f)
ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags) {
bool leftHanded = (projectionModFlags & ovrProjection_LeftHanded) > 0;
bool flipZ = (projectionModFlags & ovrProjection_FarLessThanNear) > 0;
bool farAtInfinity = (projectionModFlags & ovrProjection_FarClipAtInfinity) > 0;
bool isOpenGL = (projectionModFlags & ovrProjection_ClipRangeOpenGL) > 0;
// TODO: Pass in correct eye to CreateProjection if we want to support canted displays from CAPI
return OVR::CreateProjection(
leftHanded, isOpenGL, fov, OVR::StereoEye_Center, znear, zfar, flipZ, farAtInfinity);
}
OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc)
ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f Projection, unsigned int projectionModFlags) {
ovrTimewarpProjectionDesc res;
res.Projection22 = Projection.M[2][2];
res.Projection23 = Projection.M[2][3];
res.Projection32 = Projection.M[3][2];
if ((res.Projection32 != 1.0f) && (res.Projection32 != -1.0f)) {
// This is a very strange projection matrix, and probably won't work.
// If you need it to work, please contact Oculus and let us know your usage scenario.
}
if ((projectionModFlags & ovrProjection_ClipRangeOpenGL) != 0) {
// Internally we use the D3D range of [0,+w] not the OGL one of [-w,+w], so we need to convert
// one to the other.
// Note that the values in the depth buffer, and the actual linear depth we want is the same for
// both APIs,
// the difference is purely in the values inside the projection matrix.
// D3D does this:
// depthBuffer = ( ProjD3D.M[2][2] * linearDepth + ProjD3D.M[2][3] ) / ( linearDepth
// * ProjD3D.M[3][2] );
// OGL does this:
// depthBuffer = 0.5 + 0.5 * ( ProjOGL.M[2][2] * linearDepth + ProjOGL.M[2][3] ) / ( linearDepth
// * ProjOGL.M[3][2] );
// Therefore:
// ProjD3D.M[2][2] = 0.5 * ( ProjOGL.M[2][2] + ProjOGL.M[3][2] );
// ProjD3D.M[2][3] = 0.5 * ProjOGL.M[2][3];
// ProjD3D.M[3][2] = ProjOGL.M[3][2];
res.Projection22 = 0.5f * (Projection.M[2][2] + Projection.M[3][2]);
res.Projection23 = 0.5f * Projection.M[2][3];
res.Projection32 = Projection.M[3][2];
}
return res;
}
OVR_PUBLIC_FUNCTION(ovrMatrix4f)
ovrMatrix4f_OrthoSubProjection(
ovrMatrix4f projection,
ovrVector2f orthoScale,
float orthoDistance,
float hmdToEyeOffsetX) {
ovrMatrix4f ortho;
// Negative sign is correct!
// If the eye is offset to the left, then the ortho view needs to be offset to the right relative
// to the camera.
float orthoHorizontalOffset = -hmdToEyeOffsetX / orthoDistance;
// Current projection maps real-world vector (x,y,1) to the RT.
// We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to
// the physical [-orthoHalfFov,orthoHalfFov]
// Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means
// we don't have to feed in Z=1 all the time.
// The horizontal offset math is a little hinky because the destination is
// actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]
// So we need to first map [-FovPixels/2,FovPixels/2] to
// [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]:
// x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset;
// = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset;
// But then we need the same mapping as the existing projection matrix, i.e.
// x2 = x1 * Projection.M[0][0] + Projection.M[0][2];
// = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] +
// Projection.M[0][2]; = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels +
// orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2];
// So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels
// and offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2].
ortho.M[0][0] = projection.M[0][0] * orthoScale.x;
ortho.M[0][1] = 0.0f;
ortho.M[0][2] = 0.0f;
ortho.M[0][3] = -projection.M[0][2] + (orthoHorizontalOffset * projection.M[0][0]);
ortho.M[1][0] = 0.0f;
ortho.M[1][1] =
-projection.M[1][1] * orthoScale.y; /* Note sign flip (text rendering uses Y=down). */
ortho.M[1][2] = 0.0f;
ortho.M[1][3] = -projection.M[1][2];
ortho.M[2][0] = 0.0f;
ortho.M[2][1] = 0.0f;
ortho.M[2][2] = 0.0f;
ortho.M[2][3] = 0.0f;
/* No perspective correction for ortho. */
ortho.M[3][0] = 0.0f;
ortho.M[3][1] = 0.0f;
ortho.M[3][2] = 0.0f;
ortho.M[3][3] = 1.0f;
return ortho;
}
#undef ovr_CalcEyePoses
OVR_PUBLIC_FUNCTION(void)
ovr_CalcEyePoses(ovrPosef headPose, const ovrVector3f hmdToEyeOffset[2], ovrPosef outEyePoses[2]) {
if (!hmdToEyeOffset || !outEyePoses) {
return;
}
using OVR::Posef;
using OVR::Vector3f;
// Currently hmdToEyeOffset is only a 3D vector
outEyePoses[0] =
Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[0]));
outEyePoses[1] =
Posef(headPose.Orientation, ((Posef)headPose).Apply((Vector3f)hmdToEyeOffset[1]));
}
OVR_PRIVATE_FUNCTION(void)
ovr_CalcEyePoses2(ovrPosef headPose, const ovrPosef hmdToEyePose[2], ovrPosef outEyePoses[2]) {
if (!hmdToEyePose || !outEyePoses) {
return;
}
using OVR::Posef;
using OVR::Vector3f;
outEyePoses[0] = (Posef)headPose * (Posef)hmdToEyePose[0];
outEyePoses[1] = (Posef)headPose * (Posef)hmdToEyePose[1];
}
#undef ovr_GetEyePoses
OVR_PUBLIC_FUNCTION(void)
ovr_GetEyePoses(
ovrSession session,
long long frameIndex,
ovrBool latencyMarker,
const ovrVector3f hmdToEyeOffset[2],
ovrPosef outEyePoses[2],
double* outSensorSampleTime) {
double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex);
ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker);
ovr_CalcEyePoses(trackingState.HeadPose.ThePose, hmdToEyeOffset, outEyePoses);
if (outSensorSampleTime != nullptr) {
*outSensorSampleTime = ovr_GetTimeInSeconds();
}
}
OVR_PRIVATE_FUNCTION(void)
ovr_GetEyePoses2(
ovrSession session,
long long frameIndex,
ovrBool latencyMarker,
const ovrPosef hmdToEyePose[2],
ovrPosef outEyePoses[2],
double* outSensorSampleTime) {
double frameTime = ovr_GetPredictedDisplayTime(session, frameIndex);
ovrTrackingState trackingState = ovr_GetTrackingState(session, frameTime, latencyMarker);
ovr_CalcEyePoses2(trackingState.HeadPose.ThePose, hmdToEyePose, outEyePoses);
if (outSensorSampleTime != nullptr) {
*outSensorSampleTime = ovr_GetTimeInSeconds();
}
}
OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds) {
// Initially we assume everything is not running.
ovrDetectResult result;
result.IsOculusHMDConnected = ovrFalse;
result.IsOculusServiceRunning = ovrFalse;
#if defined(_WIN32)
// Attempt to open the named event.
HANDLE hServiceEvent = ::OpenEventW(SYNCHRONIZE, FALSE, OVR_HMD_CONNECTED_EVENT_NAME);
// If event exists,
if (hServiceEvent != nullptr) {
// This indicates that the Oculus Runtime is installed and running.
result.IsOculusServiceRunning = ovrTrue;
// Poll for event state.
DWORD objectResult = ::WaitForSingleObject(hServiceEvent, timeoutMilliseconds);
// If the event is signaled,
if (objectResult == WAIT_OBJECT_0) {
// This indicates that the Oculus HMD is connected.
result.IsOculusHMDConnected = ovrTrue;
}
::CloseHandle(hServiceEvent);
}
#else
(void)timeoutMilliseconds;
fprintf(stderr, __FILE__ "::[%s] Not implemented. Assuming single-process.\n", __func__);
result.IsOculusServiceRunning = ovrTrue;
result.IsOculusHMDConnected = ovrTrue;
#endif // OSX_UNIMPLEMENTED
return result;
}
OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose) {
outPose->Orientation.x = -inPose->Orientation.x;
outPose->Orientation.y = inPose->Orientation.y;
outPose->Orientation.z = inPose->Orientation.z;
outPose->Orientation.w = -inPose->Orientation.w;
outPose->Position.x = -inPose->Position.x;
outPose->Position.y = inPose->Position.y;
outPose->Position.z = inPose->Position.z;
}
static float wavPcmBytesToFloat(const void* data, int32_t sizeInBits, bool swapBytes) {
// TODO Support big endian
(void)swapBytes;
// There's not a strong standard to convert 8/16/32b PCM to float.
// For 16b: MSDN says range is [-32760, 32760], Pyton Scipy uses [-32767, 32767] and Audacity
// outputs the full range [-32768, 32767].
// We use the same range on both sides and clamp to [-1, 1].
float result = 0.0f;
if (sizeInBits == 8)
// uint8_t is a special case, unsigned where 128 is zero
result = (*((uint8_t*)data) / (float)UCHAR_MAX) * 2.0f - 1.0f;
else if (sizeInBits == 16)
result = *((int16_t*)data) / (float)SHRT_MAX;
// else if (sizeInBits == 24) {
// int value = data[0] | data[1] << 8 | data[2] << 16; // Need consider 2's complement
// return value / 8388607.0f;
//}
else if (sizeInBits == 32)
result = *((int32_t*)data) / (float)INT_MAX;
return ovrMax(-1.0f, result);
}
OVR_PUBLIC_FUNCTION(ovrResult)
ovr_GenHapticsFromAudioData(
ovrHapticsClip* outHapticsClip,
const ovrAudioChannelData* audioChannel,
ovrHapticsGenMode genMode) {
if (!outHapticsClip || !audioChannel || genMode != ovrHapticsGenMode_PointSample)
return ovrError_InvalidParameter;
// Validate audio channel
if (audioChannel->Frequency <= 0 || audioChannel->SamplesCount <= 0 ||
audioChannel->Samples == nullptr)
return ovrError_InvalidParameter;
const int32_t kHapticsFrequency = 320;
const int32_t kHapticsMaxAmplitude = 255;
float samplesPerStep = audioChannel->Frequency / (float)kHapticsFrequency;
int32_t hapticsSampleCount = (int32_t)ceil(audioChannel->SamplesCount / samplesPerStep);
uint8_t* hapticsSamples = new uint8_t[hapticsSampleCount];
for (int32_t i = 0; i < hapticsSampleCount; ++i) {
float sample = audioChannel->Samples[(int32_t)(i * samplesPerStep)];
uint8_t hapticSample =
(uint8_t)ovrMin(UCHAR_MAX, (int)round(fabs(sample) * kHapticsMaxAmplitude));
hapticsSamples[i] = hapticSample;
}
outHapticsClip->Samples = hapticsSamples;
outHapticsClip->SamplesCount = hapticsSampleCount;
return ovrSuccess;
}
OVR_PUBLIC_FUNCTION(ovrResult)
ovr_ReadWavFromBuffer(
ovrAudioChannelData* outAudioChannel,
const void* inputData,
int dataSizeInBytes,
int stereoChannelToUse) {
// We don't support any format other than PCM and IEEE Float
enum WavFormats {
kWavFormatUnknown = 0x0000,
kWavFormatLPCM = 0x0001,
kWavFormatFloatIEEE = 0x0003,
kWavFormatExtensible = 0xFFFE
};
struct WavHeader {
char RiffId[4]; // "RIFF" = little-endian, "RIFX" = big-endian
int32_t Size; // 4 + (8 + FmtChunkSize) + (8 + DataChunkSize)
char WavId[4]; // Must be "WAVE"
char FmtChunckId[4]; // Must be "fmt "
uint32_t FmtChunkSize; // Remaining size of this chunk (16B)
uint16_t Format; // WavFormats: PCM or Float supported
uint16_t Channels; // 1 = Mono, 2 = Stereo
uint32_t SampleRate; // e.g. 44100
uint32_t BytesPerSec; // SampleRate * BytesPerBlock
uint16_t BytesPerBlock; // (NumChannels * BitsPerSample/8)
uint16_t BitsPerSample; // 8, 16, 32
char DataChunckId[4]; // Must be "data"
uint32_t DataChunkSize; // Remaining size of this chunk
};
const int32_t kMinWavFileSize = sizeof(WavHeader) + 1;
if (!outAudioChannel || !inputData || dataSizeInBytes < kMinWavFileSize)
return ovrError_InvalidParameter;
WavHeader* header = (WavHeader*)inputData;
uint8_t* data = (uint8_t*)inputData + sizeof(WavHeader);
// Validate
const char* wavId = header->RiffId;
// TODO We need to support RIFX when supporting big endian formats
// bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && (wavId[3] == 'F' ||
// wavId[3] == 'X')) &&
bool isValidWav = (wavId[0] == 'R' && wavId[1] == 'I' && wavId[2] == 'F' && wavId[3] == 'F') &&
memcmp(header->WavId, "WAVE", 4) == 0;
bool hasValidChunks =
memcmp(header->FmtChunckId, "fmt ", 4) == 0 && memcmp(header->DataChunckId, "data ", 4) == 0;
if (!isValidWav || !hasValidChunks) {
return ovrError_InvalidOperation;
}
// We only support PCM
bool isSupported = (header->Format == kWavFormatLPCM || header->Format == kWavFormatFloatIEEE) &&
(header->Channels == 1 || header->Channels == 2) &&
(header->BitsPerSample == 8 || header->BitsPerSample == 16 || header->BitsPerSample == 32);
if (!isSupported) {
return ovrError_Unsupported;
}
// Channel selection
bool useSecondChannel = (header->Channels == 2 && stereoChannelToUse == 1);
int32_t channelOffset = (useSecondChannel) ? header->BytesPerBlock / 2 : 0;
// TODO Support big-endian
int32_t blockCount = header->DataChunkSize / header->BytesPerBlock;
float* samples = new float[blockCount];
for (int32_t i = 0; i < blockCount; i++) {
int32_t dataIndex = i * header->BytesPerBlock;
uint8_t* dataPtr = &data[dataIndex + channelOffset];
float sample = (header->Format == kWavFormatLPCM)
? wavPcmBytesToFloat(dataPtr, header->BitsPerSample, false)
: *(float*)dataPtr;
samples[i] = sample;
}
// Output
outAudioChannel->Samples = samples;
outAudioChannel->SamplesCount = blockCount;
outAudioChannel->Frequency = header->SampleRate;
return ovrSuccess;
}
OVR_PUBLIC_FUNCTION(void) ovr_ReleaseAudioChannelData(ovrAudioChannelData* audioChannel) {
if (audioChannel != nullptr && audioChannel->Samples != nullptr) {
delete[] audioChannel->Samples;
memset(audioChannel, 0, sizeof(ovrAudioChannelData));
}
}
OVR_PUBLIC_FUNCTION(void) ovr_ReleaseHapticsClip(ovrHapticsClip* hapticsClip) {
if (hapticsClip != nullptr && hapticsClip->Samples != nullptr) {
delete[](uint8_t*) hapticsClip->Samples;
memset(hapticsClip, 0, sizeof(ovrHapticsClip));
}
}

View File

@ -0,0 +1,218 @@
/************************************************************************************
Filename : OVR_StereoProjection.cpp
Content : Stereo rendering functions
Created : November 30, 2013
Authors : Tom Fosyth
Copyright : Copyright (c) Facebook Technologies, LLC and its affiliates. All rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.3
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*************************************************************************************/
#include <Extras/OVR_StereoProjection.h>
namespace OVR {
ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov(FovPort tanHalfFov) {
float projXScale = 2.0f / (tanHalfFov.LeftTan + tanHalfFov.RightTan);
float projXOffset = (tanHalfFov.LeftTan - tanHalfFov.RightTan) * projXScale * 0.5f;
float projYScale = 2.0f / (tanHalfFov.UpTan + tanHalfFov.DownTan);
float projYOffset = (tanHalfFov.UpTan - tanHalfFov.DownTan) * projYScale * 0.5f;
ScaleAndOffset2D result;
result.Scale = Vector2f(projXScale, projYScale);
result.Offset = Vector2f(projXOffset, projYOffset);
// Hey - why is that Y.Offset negated?
// It's because a projection matrix transforms from world coords with Y=up,
// whereas this is from NDC which is Y=down.
return result;
}
Matrix4f CreateProjection(
bool leftHanded,
bool isOpenGL,
FovPort tanHalfFov,
StereoEye /*eye*/,
float zNear /*= 0.01f*/,
float zFar /*= 10000.0f*/,
bool flipZ /*= false*/,
bool farAtInfinity /*= false*/) {
if (!flipZ && farAtInfinity) {
// OVR_ASSERT_M(false, "Cannot push Far Clip to Infinity when Z-order is not flipped");
// Assertion disabled because this code no longer has access to LibOVRKernel assertion
// functionality.
farAtInfinity = false;
}
// A projection matrix is very like a scaling from NDC, so we can start with that.
ScaleAndOffset2D scaleAndOffset = CreateNDCScaleAndOffsetFromFov(tanHalfFov);
float handednessScale = leftHanded ? 1.0f : -1.0f;
Matrix4f projection;
// Produces X result, mapping clip edges to [-w,+w]
projection.M[0][0] = scaleAndOffset.Scale.x;
projection.M[0][1] = 0.0f;
projection.M[0][2] = handednessScale * scaleAndOffset.Offset.x;
projection.M[0][3] = 0.0f;
// Produces Y result, mapping clip edges to [-w,+w]
// Hey - why is that YOffset negated?
// It's because a projection matrix transforms from world coords with Y=up,
// whereas this is derived from an NDC scaling, which is Y=down.
projection.M[1][0] = 0.0f;
projection.M[1][1] = scaleAndOffset.Scale.y;
projection.M[1][2] = handednessScale * -scaleAndOffset.Offset.y;
projection.M[1][3] = 0.0f;
// Produces Z-buffer result - app needs to fill this in with whatever Z range it wants.
// We'll just use some defaults for now.
projection.M[2][0] = 0.0f;
projection.M[2][1] = 0.0f;
if (farAtInfinity) {
if (isOpenGL) {
// It's not clear this makes sense for OpenGL - you don't get the same precision benefits you
// do in D3D.
projection.M[2][2] = -handednessScale;
projection.M[2][3] = 2.0f * zNear;
} else {
projection.M[2][2] = 0.0f;
projection.M[2][3] = zNear;
}
} else {
if (isOpenGL) {
// Clip range is [-w,+w], so 0 is at the middle of the range.
projection.M[2][2] =
-handednessScale * (flipZ ? -1.0f : 1.0f) * (zNear + zFar) / (zNear - zFar);
projection.M[2][3] = 2.0f * ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar);
} else {
// Clip range is [0,+w], so 0 is at the start of the range.
projection.M[2][2] = -handednessScale * (flipZ ? -zNear : zFar) / (zNear - zFar);
projection.M[2][3] = ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar);
}
}
// Produces W result (= Z in)
projection.M[3][0] = 0.0f;
projection.M[3][1] = 0.0f;
projection.M[3][2] = handednessScale;
projection.M[3][3] = 0.0f;
return projection;
}
Matrix4f CreateOrthoSubProjection(
bool /*rightHanded*/,
StereoEye eyeType,
float tanHalfFovX,
float tanHalfFovY,
float unitsX,
float unitsY,
float distanceFromCamera,
float interpupillaryDistance,
Matrix4f const& projection,
float zNear /*= 0.0f*/,
float zFar /*= 0.0f*/,
bool flipZ /*= false*/,
bool farAtInfinity /*= false*/) {
if (!flipZ && farAtInfinity) {
// OVR_ASSERT_M(false, "Cannot push Far Clip to Infinity when Z-order is not flipped");
// Assertion disabled because this code no longer has access to LibOVRKernel assertion
// functionality.
farAtInfinity = false;
}
float orthoHorizontalOffset = interpupillaryDistance * 0.5f / distanceFromCamera;
switch (eyeType) {
case StereoEye_Left:
break;
case StereoEye_Right:
orthoHorizontalOffset = -orthoHorizontalOffset;
break;
case StereoEye_Center:
orthoHorizontalOffset = 0.0f;
break;
default:
break;
}
// Current projection maps real-world vector (x,y,1) to the RT.
// We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to
// the physical [-orthoHalfFov,orthoHalfFov]
// Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means
// we don't have to feed in Z=1 all the time.
// The horizontal offset math is a little hinky because the destination is
// actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]
// So we need to first map [-FovPixels/2,FovPixels/2] to
// [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]:
// x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset;
// = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset;
// But then we need the sam mapping as the existing projection matrix, i.e.
// x2 = x1 * Projection.M[0][0] + Projection.M[0][2];
// = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] +
// Projection.M[0][2];
// = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels +
// orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2];
// So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels
// and
// offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2].
float orthoScaleX = 2.0f * tanHalfFovX / unitsX;
float orthoScaleY = 2.0f * tanHalfFovY / unitsY;
Matrix4f ortho;
ortho.M[0][0] = projection.M[0][0] * orthoScaleX;
ortho.M[0][1] = 0.0f;
ortho.M[0][2] = 0.0f;
ortho.M[0][3] = -projection.M[0][2] + (orthoHorizontalOffset * projection.M[0][0]);
ortho.M[1][0] = 0.0f;
ortho.M[1][1] = -projection.M[1][1] * orthoScaleY; // Note sign flip (text rendering uses Y=down).
ortho.M[1][2] = 0.0f;
ortho.M[1][3] = -projection.M[1][2];
const float zDiff = zNear - zFar;
if (fabsf(zDiff) < 0.001f) {
ortho.M[2][0] = 0.0f;
ortho.M[2][1] = 0.0f;
ortho.M[2][2] = 0.0f;
ortho.M[2][3] = flipZ ? zNear : zFar;
} else {
ortho.M[2][0] = 0.0f;
ortho.M[2][1] = 0.0f;
if (farAtInfinity) {
ortho.M[2][2] = 0.0f;
ortho.M[2][3] = zNear;
} else if (zDiff != 0.0f) {
ortho.M[2][2] = (flipZ ? zNear : zFar) / zDiff;
ortho.M[2][3] = ((flipZ ? -zFar : zFar) * zNear) / zDiff;
}
}
// No perspective correction for ortho.
ortho.M[3][0] = 0.0f;
ortho.M[3][1] = 0.0f;
ortho.M[3][2] = 0.0f;
ortho.M[3][3] = 1.0f;
return ortho;
}
} // namespace OVR