diff --git a/README.md b/README.md
index f8013e6f4..17ba3d0fd 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# NVIDIA PhysX
-Copyright (c) 2008-2022 NVIDIA Corporation. All rights reserved.
+Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
diff --git a/blast/PACKAGE-LICENSES/blast-sdk-LICENSE.md b/blast/PACKAGE-LICENSES/blast-sdk-LICENSE.md
index 9ae5404d1..4056e6a29 100644
--- a/blast/PACKAGE-LICENSES/blast-sdk-LICENSE.md
+++ b/blast/PACKAGE-LICENSES/blast-sdk-LICENSE.md
@@ -22,4 +22,4 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
diff --git a/blast/VERSION.md b/blast/VERSION.md
index 4d54daddb..0062ac971 100644
--- a/blast/VERSION.md
+++ b/blast/VERSION.md
@@ -1 +1 @@
-4.0.2
+5.0.0
diff --git a/blast/deps/target-deps.packman.xml b/blast/deps/target-deps.packman.xml
index 498a8d5bf..d7c8dd122 100644
--- a/blast/deps/target-deps.packman.xml
+++ b/blast/deps/target-deps.packman.xml
@@ -16,12 +16,6 @@
-
-
-
-
-
-
diff --git a/blast/docs/CHANGELOG.md b/blast/docs/CHANGELOG.md
index 804152234..b46a72a81 100644
--- a/blast/docs/CHANGELOG.md
+++ b/blast/docs/CHANGELOG.md
@@ -1,5 +1,18 @@
# Changelog
+## [5.0.0] - 23-Jan-2023
+
+### Changes
+- Removed all PhysX dependencies from code outside of the ExtPx extension
+- Replaced Px types with NvShared types
+- NvFoundation headers in include/shared/NvFoundation
+ - Includes NvPreprocessor.h and NvcTypes.h (formerly in include/lowlevel)
+ - Include basic Nv types, such as NvVec3 (used by the Tk library)
+- Consolidated header structure
+ - include/lowlevel/NvBlastPreprocessor.h is gone
+ - Previously-defined NVBLAST_API has been renamed NV_C_API and is now defined in NvPreprocessor.h
+
+
## [4.0.2] - 31-Aug-2022
### Bugfixes
diff --git a/blast/include/extensions/assetutils/NvBlastExtAssetUtils.h b/blast/include/extensions/assetutils/NvBlastExtAssetUtils.h
index dce2e7b92..24b1f4324 100644
--- a/blast/include/extensions/assetutils/NvBlastExtAssetUtils.h
+++ b/blast/include/extensions/assetutils/NvBlastExtAssetUtils.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -48,7 +48,7 @@ Reauthor the provided asset to create external bonds in the specified support ch
\return a new asset with added bonds if successful, NULL otherwise.
*/
-NVBLAST_API NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds
+NV_C_API NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds
(
const NvBlastAsset* asset,
const uint32_t* externalBoundChunks,
@@ -85,7 +85,7 @@ NVBLAST_FREE appied to the pointers in the returned NvBlastAssetDesc.
\return an asset descriptor that will build an exact duplicate of the input asset.
*/
-NVBLAST_API NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset);
+NV_C_API NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset);
/**
@@ -126,7 +126,7 @@ NVBLAST_FREE appied to the pointers in the returned NvBlastAssetDesc.
\return an asset descriptor that will build an asset which merges the components, using NvBlastCreateAsset.
*/
-NVBLAST_API NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets
+NV_C_API NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets
(
const NvBlastAsset** components,
const NvcVec3* scales,
@@ -151,7 +151,7 @@ Chunk volume and bond area are changed accordingly.
\param[in] rotation Pointer to rotation to be applied. Can be nullptr.
\param[in] translation Pointer to translation to be applied. Can be nullptr.
*/
-NVBLAST_API void NvBlastExtAssetTransformInPlace
+NV_C_API void NvBlastExtAssetTransformInPlace
(
NvBlastAsset* asset,
const NvcVec3* scale,
diff --git a/blast/include/extensions/authoring/NvBlastExtAuthoring.h b/blast/include/extensions/authoring/NvBlastExtAuthoring.h
index d5a003ec8..06244b1d8 100644
--- a/blast/include/extensions/authoring/NvBlastExtAuthoring.h
+++ b/blast/include/extensions/authoring/NvBlastExtAuthoring.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -67,7 +67,7 @@ User should call release() after usage.
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
-NVBLAST_API Nv::Blast::Mesh*
+NV_C_API Nv::Blast::Mesh*
NvBlastExtAuthoringCreateMesh(const NvcVec3* positions, const NvcVec3* normals, const NvcVec2* uv,
uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount);
@@ -84,7 +84,7 @@ User should call Mesh::release() after usage.
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
-NVBLAST_API Nv::Blast::Mesh*
+NV_C_API Nv::Blast::Mesh*
NvBlastExtAuthoringCreateMeshOnlyTriangles(const void* vertices, uint32_t verticesCount, uint32_t* indices,
uint32_t indexCount, void* materials = nullptr, uint32_t materialStride = 4);
@@ -101,7 +101,7 @@ User should call release() after usage.
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
-NVBLAST_API Nv::Blast::Mesh*
+NV_C_API Nv::Blast::Mesh*
NvBlastExtAuthoringCreateMeshFromFacets(const void* vertices, const void* edges, const void* facets,
uint32_t verticesCount, uint32_t edgesCount, uint32_t facetsCount);
@@ -112,11 +112,11 @@ should be supplied with fracture mesh.
\param[in] rnd User supplied random value generator.
\return Pointer to VoronoiSitesGenerator. User's code should release it after usage.
*/
-NVBLAST_API Nv::Blast::VoronoiSitesGenerator*
+NV_C_API Nv::Blast::VoronoiSitesGenerator*
NvBlastExtAuthoringCreateVoronoiSitesGenerator(Nv::Blast::Mesh* mesh, Nv::Blast::RandomGeneratorBase* rng);
/** Instantiates a blank CutoutSet */
-NVBLAST_API Nv::Blast::CutoutSet* NvBlastExtAuthoringCreateCutoutSet();
+NV_C_API Nv::Blast::CutoutSet* NvBlastExtAuthoringCreateCutoutSet();
/**
Builds a cutout set (which must have been initially created by createCutoutSet()).
@@ -134,7 +134,7 @@ segments may be fudged into alignment. By default set it to 1.
\param expandGaps expand cutout regions to gaps or keep it as is
*/
-NVBLAST_API void
+NV_C_API void
NvBlastExtAuthoringBuildCutoutSet(Nv::Blast::CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth,
uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold,
bool periodic, bool expandGaps);
@@ -143,13 +143,13 @@ NvBlastExtAuthoringBuildCutoutSet(Nv::Blast::CutoutSet& cutoutSet, const uint8_t
Create FractureTool object.
\return Pointer to create FractureTool. User's code should release it after usage.
*/
-NVBLAST_API Nv::Blast::FractureTool* NvBlastExtAuthoringCreateFractureTool();
+NV_C_API Nv::Blast::FractureTool* NvBlastExtAuthoringCreateFractureTool();
/**
Create BlastBondGenerator
\return Pointer to created BlastBondGenerator. User's code should release it after usage.
*/
-NVBLAST_API Nv::Blast::BlastBondGenerator* NvBlastExtAuthoringCreateBondGenerator(Nv::Blast::ConvexMeshBuilder* builder);
+NV_C_API Nv::Blast::BlastBondGenerator* NvBlastExtAuthoringCreateBondGenerator(Nv::Blast::ConvexMeshBuilder* builder);
/**
Build convex mesh decomposition.
@@ -160,7 +160,7 @@ Build convex mesh decomposition.
\return Number of created convex hulls.
*/
-NVBLAST_API int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(Nv::Blast::ConvexMeshBuilder* cmb,
+NV_C_API int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(Nv::Blast::ConvexMeshBuilder* cmb,
const Nv::Blast::Triangle* mesh,
uint32_t triangleCount,
const Nv::Blast::ConvexDecompositionParams& params,
@@ -179,7 +179,7 @@ NVBLAST_API int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(Nv::Blast::C
\param[in] chunkDepth Array of depth levels of convex hulls corresponding chunks.
*/
-NVBLAST_API void NvBlastExtAuthoringTrimCollisionGeometry(Nv::Blast::ConvexMeshBuilder* cmb, uint32_t chunksCount,
+NV_C_API void NvBlastExtAuthoringTrimCollisionGeometry(Nv::Blast::ConvexMeshBuilder* cmb, uint32_t chunksCount,
Nv::Blast::CollisionHull** in, const uint32_t* chunkDepth);
@@ -190,7 +190,7 @@ Transforms collision hull in place using scale, rotation, transform.
\param[in] rotation Pointer to rotation to be applied. Can be nullptr.
\param[in] translation Pointer to translation to be applied. Can be nullptr.
*/
-NVBLAST_API void NvBlastExtAuthoringTransformCollisionHullInPlace(Nv::Blast::CollisionHull* hull, const NvcVec3* scaling,
+NV_C_API void NvBlastExtAuthoringTransformCollisionHullInPlace(Nv::Blast::CollisionHull* hull, const NvcVec3* scaling,
const NvcQuat* rotation, const NvcVec3* translation);
/**
@@ -200,7 +200,7 @@ Transforms collision hull in place using scale, rotation, transform.
\param[in] rotation Pointer to rotation to be applied. Can be nullptr.
\param[in] translation Pointer to translation to be applied. Can be nullptr.
*/
-NVBLAST_API Nv::Blast::CollisionHull*
+NV_C_API Nv::Blast::CollisionHull*
NvBlastExtAuthoringTransformCollisionHull(const Nv::Blast::CollisionHull* hull, const NvcVec3* scaling,
const NvcQuat* rotation, const NvcVec3* translation);
@@ -215,7 +215,7 @@ Performs pending fractures and generates fractured asset, render and collision g
\param[in] collisionParam Parameters of collision hulls generation.
\return Authoring result
*/
-NVBLAST_API Nv::Blast::AuthoringResult*
+NV_C_API Nv::Blast::AuthoringResult*
NvBlastExtAuthoringProcessFracture(Nv::Blast::FractureTool& fTool, Nv::Blast::BlastBondGenerator& bondGenerator,
Nv::Blast::ConvexMeshBuilder& collisionBuilder,
const Nv::Blast::ConvexDecompositionParams& collisionParam,
@@ -225,12 +225,12 @@ NvBlastExtAuthoringProcessFracture(Nv::Blast::FractureTool& fTool, Nv::Blast::Bl
/**
Releases collision data for AuthoringResult. AuthoringResult should be created by NvBlast.
*/
-NVBLAST_API void NvBlastExtAuthoringReleaseAuthoringResultCollision(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar);
+NV_C_API void NvBlastExtAuthoringReleaseAuthoringResultCollision(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar);
/**
Releases AuthoringResult data. AuthoringResult should be created by NvBlast.
*/
-NVBLAST_API void NvBlastExtAuthoringReleaseAuthoringResult(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar);
+NV_C_API void NvBlastExtAuthoringReleaseAuthoringResult(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar);
/**
@@ -240,7 +240,7 @@ Updates graphics mesh only
\param[out] ares AuthoringResult object which contains chunks, for which rendermeshes will be updated
(e.g. to tweak UVs). Initially should be created by NvBlastExtAuthoringProcessFracture.
*/
-NVBLAST_API void NvBlastExtAuthoringUpdateGraphicsMesh(Nv::Blast::FractureTool& fTool, Nv::Blast::AuthoringResult& ares);
+NV_C_API void NvBlastExtAuthoringUpdateGraphicsMesh(Nv::Blast::FractureTool& fTool, Nv::Blast::AuthoringResult& ares);
/**
Build collision meshes
@@ -251,7 +251,7 @@ Parameters of collision hulls generation.
\param[in] chunksToProcessCount Number of chunk indices in chunksToProcess memory buffer.
\param[in] chunksToProcess Chunk indices for which collision mesh should be built.
*/
-NVBLAST_API void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringResult& ares,
+NV_C_API void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringResult& ares,
Nv::Blast::ConvexMeshBuilder& collisionBuilder,
const Nv::Blast::ConvexDecompositionParams& collisionParam,
uint32_t chunksToProcessCount, uint32_t* chunksToProcess);
@@ -260,7 +260,7 @@ NVBLAST_API void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringRes
Creates MeshCleaner object
\return pointer to Nv::Blast::Mesh if it was created succefully otherwise return nullptr
*/
-NVBLAST_API Nv::Blast::MeshCleaner* NvBlastExtAuthoringCreateMeshCleaner();
+NV_C_API Nv::Blast::MeshCleaner* NvBlastExtAuthoringCreateMeshCleaner();
/**
Finds bonds connecting chunks in a list of assets
@@ -287,7 +287,7 @@ relativeTransforms arrays.
\param[in] maxSeparation Maximal distance between chunks which can be connected by bond.
\return the number of bonds in newBondDescs
*/
-NVBLAST_API uint32_t NvBlastExtAuthoringFindAssetConnectingBonds(
+NV_C_API uint32_t NvBlastExtAuthoringFindAssetConnectingBonds(
const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations,
const uint32_t** convexHullOffsets, const Nv::Blast::CollisionHull*** chunkHulls, uint32_t componentCount,
NvBlastExtAssetUtilsBondDesc*& newBondDescs, float maxSeparation = 0.0f);
@@ -295,31 +295,31 @@ NVBLAST_API uint32_t NvBlastExtAuthoringFindAssetConnectingBonds(
/**
Returns pattern generator used for generating fracture patterns.
*/
-NVBLAST_API Nv::Blast::PatternGenerator* NvBlastExtAuthoringCreatePatternGenerator();
+NV_C_API Nv::Blast::PatternGenerator* NvBlastExtAuthoringCreatePatternGenerator();
/**
Create spatial grid for mesh.
Release using Nv::Blast::SpatialGrid::release()
*/
-NVBLAST_API Nv::Blast::SpatialGrid* NvBlastExtAuthoringCreateSpatialGrid(uint32_t resolution, const Nv::Blast::Mesh* m);
+NV_C_API Nv::Blast::SpatialGrid* NvBlastExtAuthoringCreateSpatialGrid(uint32_t resolution, const Nv::Blast::Mesh* m);
/**
Create GridAccelerator - SpatialAccelerator which use Grid for faster mesh sampling.
Release using Nv::Blast::SpatialAccelerator::release()
*/
-NVBLAST_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateGridAccelerator(Nv::Blast::SpatialGrid* parent);
+NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateGridAccelerator(Nv::Blast::SpatialGrid* parent);
/**
Create SweepingAccelerator - SpatialAccelerator which uses a sweep algorithm.
Release using Nv::Blast::SpatialAccelerator::release()
*/
-NVBLAST_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateSweepingAccelerator(const Nv::Blast::Mesh* m);
+NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateSweepingAccelerator(const Nv::Blast::Mesh* m);
/**
Create BBoxBasedAccelerator - SpatialAccelerator which uses a bbox/grid algorithm.
Release using Nv::Blast::SpatialAccelerator::release()
*/
-NVBLAST_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAccelerator(uint32_t resolution, const Nv::Blast::Mesh* m);
+NV_C_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAccelerator(uint32_t resolution, const Nv::Blast::Mesh* m);
#define kBBoxBasedAcceleratorDefaultResolution 10
@@ -327,7 +327,7 @@ NVBLAST_API Nv::Blast::SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAcc
Create BooleanTool object.
\return Pointer to created BooleanTool. User's code should release it after usage.
*/
-NVBLAST_API Nv::Blast::BooleanTool* NvBlastExtAuthoringCreateBooleanTool();
+NV_C_API Nv::Blast::BooleanTool* NvBlastExtAuthoringCreateBooleanTool();
#endif // ifndef NVBLASTAUTHORING_H
diff --git a/blast/include/extensions/authoring/NvBlastExtAuthoringBondGenerator.h b/blast/include/extensions/authoring/NvBlastExtAuthoringBondGenerator.h
index a9fde0211..a908b103e 100644
--- a/blast/include/extensions/authoring/NvBlastExtAuthoringBondGenerator.h
+++ b/blast/include/extensions/authoring/NvBlastExtAuthoringBondGenerator.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h b/blast/include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h
index 335a0d62d..6c87ae334 100644
--- a/blast/include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h
+++ b/blast/include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2021 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/authoring/NvBlastExtAuthoringCutout.h b/blast/include/extensions/authoring/NvBlastExtAuthoringCutout.h
index cf30ab2ab..80bde15ab 100644
--- a/blast/include/extensions/authoring/NvBlastExtAuthoringCutout.h
+++ b/blast/include/extensions/authoring/NvBlastExtAuthoringCutout.h
@@ -11,7 +11,7 @@
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/authoring/NvBlastExtAuthoringFractureTool.h b/blast/include/extensions/authoring/NvBlastExtAuthoringFractureTool.h
index 351b4c1b0..d79006d9c 100644
--- a/blast/include/extensions/authoring/NvBlastExtAuthoringFractureTool.h
+++ b/blast/include/extensions/authoring/NvBlastExtAuthoringFractureTool.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/authoring/NvBlastExtAuthoringMeshCleaner.h b/blast/include/extensions/authoring/NvBlastExtAuthoringMeshCleaner.h
index ffcf6db88..4f3252a6d 100644
--- a/blast/include/extensions/authoring/NvBlastExtAuthoringMeshCleaner.h
+++ b/blast/include/extensions/authoring/NvBlastExtAuthoringMeshCleaner.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringAccelerator.h b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringAccelerator.h
index d6c3b9ad7..73fdd5a1d 100644
--- a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringAccelerator.h
+++ b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringAccelerator.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringConvexMeshBuilder.h b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringConvexMeshBuilder.h
index 30620544c..64080d4f7 100644
--- a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringConvexMeshBuilder.h
+++ b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringConvexMeshBuilder.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -44,7 +44,6 @@ struct CollisionHull;
ConvexMeshBuilder provides routine to build collision hulls from array of vertices.
Collision hull is built as convex hull of provided point set.
If due to some reason building of convex hull is failed, collision hull is built as bounding box of vertex set.
- PhysX implementation can be found in NvBlastExtPx.
*/
class ConvexMeshBuilder
{
diff --git a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringMesh.h b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringMesh.h
index a476302cc..2754f2113 100644
--- a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringMesh.h
+++ b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringMesh.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringPatternGenerator.h b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringPatternGenerator.h
index e8a68369e..d557e76e6 100644
--- a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringPatternGenerator.h
+++ b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringPatternGenerator.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -119,7 +119,7 @@ namespace Nv
virtual void release() = 0;
};
- NVBLAST_API void savePatternToObj(DamagePattern* pattern);
+ NV_C_API void savePatternToObj(DamagePattern* pattern);
} // namespace Blast
} // namespace Nv
diff --git a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringTypes.h b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringTypes.h
index f8a637e58..a5c08e1f0 100644
--- a/blast/include/extensions/authoringCommon/NvBlastExtAuthoringTypes.h
+++ b/blast/include/extensions/authoringCommon/NvBlastExtAuthoringTypes.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/serialization/NvBlastExtLlSerialization.h b/blast/include/extensions/serialization/NvBlastExtLlSerialization.h
index 081907b77..9752ba31a 100644
--- a/blast/include/extensions/serialization/NvBlastExtLlSerialization.h
+++ b/blast/include/extensions/serialization/NvBlastExtLlSerialization.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -69,8 +69,8 @@ struct LlObjectTypeID
/**
Load all low-level serializers into the ExtSerialization manager. *N.B.* This is done automatically when
the ExtSerialization manager is created via NvBlastExtSerializationCreate(), so currently this public function
-is unnecessary. Note also that other modules' serializers (ExtTkSerialization and ExtPxSerialization) are
-_not_ loaded automatically, and need to be explicitly loaded by the user using their respective load functions.
+is unnecessary. Note also that other modules' serializers (e.g. ExtTkSerialization) are _not_ loaded
+automatically, and need to be explicitly loaded by the user using their respective load functions.
It does no harm to call this function more than once; serializers already loaded will not be loaded again.
@@ -78,7 +78,7 @@ It does no harm to call this function more than once; serializers already loaded
\return the number of serializers loaded.
*/
-NVBLAST_API size_t NvBlastExtLlSerializerLoadSet(Nv::Blast::ExtSerialization& serialization);
+NV_C_API size_t NvBlastExtLlSerializerLoadSet(Nv::Blast::ExtSerialization& serialization);
/**
@@ -95,7 +95,7 @@ Equivalent to:
\return the number of bytes serialized into the buffer (zero if unsuccessful).
*/
-NVBLAST_API uint64_t NvBlastExtSerializationSerializeAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastAsset* asset);
+NV_C_API uint64_t NvBlastExtSerializationSerializeAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastAsset* asset);
/**
@@ -112,4 +112,4 @@ Equivalent to:
\return the number of bytes serialized into the buffer (zero if unsuccessful).
*/
-NVBLAST_API uint64_t NvBlastExtSerializationSerializeFamilyIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastFamily* family);
+NV_C_API uint64_t NvBlastExtSerializationSerializeFamilyIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastFamily* family);
diff --git a/blast/include/extensions/serialization/NvBlastExtSerialization.h b/blast/include/extensions/serialization/NvBlastExtSerialization.h
index 10f574f92..6f0e9e19d 100644
--- a/blast/include/extensions/serialization/NvBlastExtSerialization.h
+++ b/blast/include/extensions/serialization/NvBlastExtSerialization.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -155,4 +155,4 @@ This uses the global allocator set in NvBlastGlobals.h.
\return a new serialization manager.
*/
-NVBLAST_API Nv::Blast::ExtSerialization* NvBlastExtSerializationCreate();
+NV_C_API Nv::Blast::ExtSerialization* NvBlastExtSerializationCreate();
diff --git a/blast/include/extensions/serialization/NvBlastExtTkSerialization.h b/blast/include/extensions/serialization/NvBlastExtTkSerialization.h
index 52aba3fc9..c7d51c720 100644
--- a/blast/include/extensions/serialization/NvBlastExtTkSerialization.h
+++ b/blast/include/extensions/serialization/NvBlastExtTkSerialization.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -71,7 +71,7 @@ It does no harm to call this function more than once; serializers already loaded
\return the number of serializers loaded.
*/
-NVBLAST_API size_t NvBlastExtTkSerializerLoadSet(Nv::Blast::TkFramework& framework, Nv::Blast::ExtSerialization& serialization);
+NV_C_API size_t NvBlastExtTkSerializerLoadSet(Nv::Blast::TkFramework& framework, Nv::Blast::ExtSerialization& serialization);
/**
@@ -88,4 +88,4 @@ Equivalent to:
\return the number of bytes serialized into the buffer (zero if unsuccessful).
*/
-NVBLAST_API uint64_t NvBlastExtSerializationSerializeTkAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const Nv::Blast::TkAsset* asset);
+NV_C_API uint64_t NvBlastExtSerializationSerializeTkAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const Nv::Blast::TkAsset* asset);
diff --git a/blast/include/extensions/shaders/NvBlastExtDamageShaders.h b/blast/include/extensions/shaders/NvBlastExtDamageShaders.h
index 6ce815a48..f0aa779cd 100644
--- a/blast/include/extensions/shaders/NvBlastExtDamageShaders.h
+++ b/blast/include/extensions/shaders/NvBlastExtDamageShaders.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -52,7 +52,7 @@ class NvBlastExtDamageAccelerator
virtual Nv::Blast::DebugBuffer fillDebugRender(int depth = -1, bool segments = false) = 0;
};
-NVBLAST_API NvBlastExtDamageAccelerator* NvBlastExtDamageAcceleratorCreate(const NvBlastAsset* asset, int type);
+NV_C_API NvBlastExtDamageAccelerator* NvBlastExtDamageAcceleratorCreate(const NvBlastAsset* asset, int type);
///////////////////////////////////////////////////////////////////////////////
@@ -141,10 +141,10 @@ NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction
They are not expected to be called directly.
@see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction
*/
-NVBLAST_API void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
-NVBLAST_API void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
-NVBLAST_API void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
-NVBLAST_API void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
///////////////////////////////////////////////////////////////////////////////
@@ -173,8 +173,8 @@ NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction
They are not expected to be called directly.
@see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction
*/
-NVBLAST_API void NvBlastExtCapsuleFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
-NVBLAST_API void NvBlastExtCapsuleFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtCapsuleFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtCapsuleFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
///////////////////////////////////////////////////////////////////////////////
@@ -202,8 +202,8 @@ NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction
They are not expected to be called directly.
@see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction
*/
-NVBLAST_API void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
-NVBLAST_API void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
///////////////////////////////////////////////////////////////////////////////
@@ -235,8 +235,8 @@ NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction
They are not expected to be called directly.
@see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction
*/
-NVBLAST_API void NvBlastExtTriangleIntersectionGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
-NVBLAST_API void NvBlastExtTriangleIntersectionSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtTriangleIntersectionGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtTriangleIntersectionSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
///////////////////////////////////////////////////////////////////////////////
@@ -272,8 +272,8 @@ NOTE: The signature of shader functions are equal to NvBlastGraphShaderFunction
They are not expected to be called directly.
@see NvBlastGraphShaderFunction, NvBlastSubgraphShaderFunction
*/
-NVBLAST_API void NvBlastExtImpactSpreadGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
-NVBLAST_API void NvBlastExtImpactSpreadSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtImpactSpreadGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params);
+NV_C_API void NvBlastExtImpactSpreadSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params);
#endif // NVBLASTEXTDAMAGESHADERS_H
diff --git a/blast/include/extensions/stress/NvBlastExtStressSolver.h b/blast/include/extensions/stress/NvBlastExtStressSolver.h
index 72ab52494..003ad1172 100644
--- a/blast/include/extensions/stress/NvBlastExtStressSolver.h
+++ b/blast/include/extensions/stress/NvBlastExtStressSolver.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -121,7 +121,7 @@ class NV_DLL_EXPORT ExtStressSolver
/**
Create a new ExtStressSolver.
- \param[in] family The ExtPxFamily instance to calculate stress on.
+ \param[in] family The NvBlastFamily instance to calculate stress on.
\param[in] settings The settings to be set on ExtStressSolver.
\return the new ExtStressSolver if successful, NULL otherwise.
diff --git a/blast/include/globals/NvBlastAllocator.h b/blast/include/globals/NvBlastAllocator.h
index 0e83a2513..c2d1715f4 100644
--- a/blast/include/globals/NvBlastAllocator.h
+++ b/blast/include/globals/NvBlastAllocator.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -31,10 +31,11 @@
#ifndef NVBLASTALLOCATOR_H
#define NVBLASTALLOCATOR_H
+#include "NvAllocatorCallback.h"
#include "NvBlastGlobals.h"
/**
-This file contains AllocatorCallback wrappers compatible with PxShared containers.
+This file contains nvidia::NvAllocatorCallback wrappers compatible with NvShared containers.
*/
namespace Nv
@@ -43,7 +44,7 @@ namespace Blast
{
/**
-Allocator uses global AllocatorCallback.
+Allocator uses global nvidia::NvAllocatorCallback.
*/
class Allocator
{
diff --git a/blast/include/globals/NvBlastDebugRender.h b/blast/include/globals/NvBlastDebugRender.h
index 16a7f9c57..aa790dc20 100644
--- a/blast/include/globals/NvBlastDebugRender.h
+++ b/blast/include/globals/NvBlastDebugRender.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/globals/NvBlastGlobals.h b/blast/include/globals/NvBlastGlobals.h
index f5833c4c4..5792164d7 100644
--- a/blast/include/globals/NvBlastGlobals.h
+++ b/blast/include/globals/NvBlastGlobals.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTGLOBALS_H
@@ -30,155 +30,54 @@
#include
#include "NvBlastTypes.h"
+#include "NvAllocatorCallback.h"
+#include "NvErrorCallback.h"
+#include "NvProfiler.h"
+
//! @file
//!
//! @brief API for the NvBlastGlobals library
-namespace Nv
-{
-namespace Blast
-{
-
-
-/**
-\brief Abstract base class for an application defined memory allocator that can be used by toolkit (Tk) or any extension (Ext).
-*/
-class AllocatorCallback
-{
-public:
- /**
- \brief destructor
- */
- virtual ~AllocatorCallback()
- {
- }
-
- /**
- \brief Allocates size bytes of memory, which must be 16-byte aligned.
-
- This method should never return NULL. If you run out of memory, then
- you should terminate the app or take some other appropriate action.
-
- \param size Number of bytes to allocate.
- \param typeName Name of the datatype that is being allocated
- \param filename The source file which allocated the memory
- \param line The source line which allocated the memory
- \return The allocated block of memory.
- */
- virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0;
-
- /**
- \brief Frees memory previously allocated by allocate().
-
- \param ptr Memory to free.
- */
- virtual void deallocate(void* ptr) = 0;
-};
-
+//////// Global API to Access Global nvidia::NvAllocatorCallback, nvidia::NvErrorCallback, and nvidia::NvProfilerCallback ////////
/**
-\brief Error codes
-
-These error codes are passed to #ErrorCallback
-
-\note: It's actually the copy of PxErrorCallback's PxErrorCode so it can be easily casted to it. Keep that
-in mind if you are going to change this enum.
+Retrieve a pointer to the global nvidia::NvAllocatorCallback. Default implementation with std allocator is used if user didn't provide
+their own. It always exists, 'nullptr' will never be returned.
-@see ErrorCallback
+\return the pointer to the global nvidia::NvAllocatorCallback.
*/
-struct ErrorCode
-{
- enum Enum
- {
- eNO_ERROR = 0,
-
- //! \brief An informational message.
- eDEBUG_INFO = 1,
-
- //! \brief a warning message for the user to help with debugging
- eDEBUG_WARNING = 2,
-
- //! \brief method called with invalid parameter(s)
- eINVALID_PARAMETER = 4,
-
- //! \brief method was called at a time when an operation is not possible
- eINVALID_OPERATION = 8,
-
- //! \brief method failed to allocate some memory
- eOUT_OF_MEMORY = 16,
-
- /** \brief The library failed for some reason.
- Possibly you have passed invalid values like NaNs, which are not checked for.
- */
- eINTERNAL_ERROR = 32,
-
- //! \brief An unrecoverable error, execution should be halted and log output flushed
- eABORT = 64,
-
- //! \brief The SDK has determined that an operation may result in poor performance.
- ePERF_WARNING = 128,
-
- //! \brief A bit mask for including all errors
- eMASK_ALL = -1
- };
-};
-
+NV_C_API nvidia::NvAllocatorCallback* NvBlastGlobalGetAllocatorCallback();
/**
-\brief User defined interface class. Used by the library to emit debug information.
-
-\note The SDK state should not be modified from within any error reporting functions.
+Set global nvidia::NvAllocatorCallback. If 'nullptr' is passed the default nvidia::NvAllocatorCallback with std allocator is set.
*/
-class ErrorCallback
-{
-public:
- virtual ~ErrorCallback()
- {
- }
-
- /**
- \brief Reports an error code.
- \param code Error code, see #ErrorCode
- \param message Message to display.
- \param file File error occured in.
- \param line Line number error occured on.
- */
- virtual void reportError(ErrorCode::Enum code, const char* message, const char* file, int line) = 0;
-};
-
-
-} // namespace Blast
-} // namespace Nv
-
-
-//////// Global API to Access Global AllocatorCallback and ErrorCallback ////////
+NV_C_API void NvBlastGlobalSetAllocatorCallback(nvidia::NvAllocatorCallback* allocatorCallback);
/**
-Retrieve a pointer to the global AllocatorCallback. Default implementation with std allocator is used if user didn't provide
-it's own. It always exist, 'nullptr' will never be returned.
+Retrieve a pointer to the global nvidia::NvErrorCallback. Default implementation which writes messages to stdout is used if user didn't provide
+their own. It always exists, 'nullptr' will never be returned.
-\return the pointer to the global AllocatorCallback.
+\return the pointer to the global nvidia::NvErrorCallback.
*/
-NVBLAST_API Nv::Blast::AllocatorCallback* NvBlastGlobalGetAllocatorCallback();
+NV_C_API nvidia::NvErrorCallback* NvBlastGlobalGetErrorCallback();
/**
-Set global AllocatorCallback. If 'nullptr' is passed the default AllocatorCallback with std allocator is set.
+Set global nvidia::NvErrorCallback. If 'nullptr' is passed the default nvidia::NvErrorCallback that writes messages to stdout is set.
*/
-NVBLAST_API void NvBlastGlobalSetAllocatorCallback(Nv::Blast::AllocatorCallback* allocatorCallback);
+NV_C_API void NvBlastGlobalSetErrorCallback(nvidia::NvErrorCallback* errorCallback);
/**
-Retrieve a pointer to the global ErrorCallback. Default implementation which writes messages to stdout is used if user didn't provide
-it's own. It always exist, 'nullptr' will never be returned.
+Retrieve a pointer to the global nvidia::NvProfilerCallback. Returns nullptr if none is set.
-\return the pointer to the global ErrorCallback.
+\return the pointer to the global nvidia::NvProfilerCallback.
*/
-NVBLAST_API Nv::Blast::ErrorCallback* NvBlastGlobalGetErrorCallback();
+NV_C_API nvidia::NvProfilerCallback* NvBlastGlobalGetProfilerCallback();
/**
-Set global ErrorCallback. If 'nullptr' is passed the default ErrorCallback that writes messages to stdout is set.
+Set a custom profiler callback. May be nullptr (the default).
*/
-NVBLAST_API void NvBlastGlobalSetErrorCallback(Nv::Blast::ErrorCallback* errorCallback);
+NV_C_API void NvBlastGlobalSetProfilerCallback(nvidia::NvProfilerCallback* profilerCallback);
//////// Helper Global Functions ////////
@@ -188,21 +87,20 @@ namespace Nv
namespace Blast
{
-
/**
Logging wrapper compatible with NvBlastLog. @see NvBlastLog.
-Pass this function to LowLevel function calls in order to get logging into global ErrorCallback.
+Pass this function to LowLevel function calls in order to get logging into global nvidia::NvErrorCallback.
*/
NV_INLINE void logLL(int type, const char* msg, const char* file, int line)
{
- ErrorCode::Enum errorCode = ErrorCode::eNO_ERROR;
+ nvidia::NvErrorCode::Enum errorCode = nvidia::NvErrorCode::eNO_ERROR;
switch (type)
{
- case NvBlastMessage::Error: errorCode = ErrorCode::eINVALID_OPERATION; break;
- case NvBlastMessage::Warning: errorCode = ErrorCode::eDEBUG_WARNING; break;
- case NvBlastMessage::Info: errorCode = ErrorCode::eDEBUG_INFO; break;
- case NvBlastMessage::Debug: errorCode = ErrorCode::eNO_ERROR; break;
+ case NvBlastMessage::Error: errorCode = nvidia::NvErrorCode::eINVALID_OPERATION; break;
+ case NvBlastMessage::Warning: errorCode = nvidia::NvErrorCode::eDEBUG_WARNING; break;
+ case NvBlastMessage::Info: errorCode = nvidia::NvErrorCode::eDEBUG_INFO; break;
+ case NvBlastMessage::Debug: errorCode = nvidia::NvErrorCode::eNO_ERROR; break;
}
NvBlastGlobalGetErrorCallback()->reportError(errorCode, msg, file, line);
@@ -217,7 +115,7 @@ NV_INLINE void logLL(int type, const char* msg, const char* file, int line)
//////// Allocator macros ////////
/**
-Alloc/Free macros that use global AllocatorCallback. Thus allocated memory is 16-byte aligned.
+Alloc/Free macros that use global nvidia::NvAllocatorCallback. Thus allocated memory is 16-byte aligned.
*/
#define NVBLAST_ALLOC(_size) NvBlastGlobalGetAllocatorCallback()->allocate(_size, nullptr, __FILE__, __LINE__)
#define NVBLAST_ALLOC_NAMED(_size, _name) NvBlastGlobalGetAllocatorCallback()->allocate(_size, _name, __FILE__, __LINE__)
@@ -249,16 +147,16 @@ Example: NVBLAST_DELETE(foo, Foo);
//////// Log macros ////////
/**
-Logging macros that use global AllocatorCallback.
+Logging macros that use global nvidia::NvAllocatorCallback.
*/
#define NVBLAST_LOG(_code, _msg) NvBlastGlobalGetErrorCallback()->reportError(_code, _msg, __FILE__, __LINE__)
-#define NVBLAST_LOG_ERROR(_msg) NVBLAST_LOG(Nv::Blast::ErrorCode::eINVALID_OPERATION, _msg)
-#define NVBLAST_LOG_WARNING(_msg) NVBLAST_LOG(Nv::Blast::ErrorCode::eDEBUG_WARNING, _msg)
-#define NVBLAST_LOG_INFO(_msg) NVBLAST_LOG(Nv::Blast::ErrorCode::eDEBUG_INFO, _msg)
-#define NVBLAST_LOG_DEBUG(_msg) NVBLAST_LOG(Nv::Blast::ErrorCode::eNO_ERROR, _msg)
+#define NVBLAST_LOG_ERROR(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eINVALID_OPERATION, _msg)
+#define NVBLAST_LOG_WARNING(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eDEBUG_WARNING, _msg)
+#define NVBLAST_LOG_INFO(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eDEBUG_INFO, _msg)
+#define NVBLAST_LOG_DEBUG(_msg) NVBLAST_LOG(nvidia::NvErrorCode::eNO_ERROR, _msg)
/**
-Check macros that use global AllocatorCallback. The idea is that you pass an expression to check, if it fails
+Check macros that use global nvidia::NvAllocatorCallback. The idea is that you pass an expression to check, if it fails
it logs and calls '_onFail' code you passed.
*/
#define NVBLAST_CHECK(_code, _expr, _msg, _onFail) \
@@ -270,10 +168,10 @@ it logs and calls '_onFail' code you passed.
} \
}
-#define NVBLAST_CHECK_ERROR(_expr, _msg, _onFail) NVBLAST_CHECK(Nv::Blast::ErrorCode::eINVALID_OPERATION, _expr, _msg, _onFail)
-#define NVBLAST_CHECK_WARNING(_expr, _msg, _onFail) NVBLAST_CHECK(Nv::Blast::ErrorCode::eDEBUG_WARNING, _expr, _msg, _onFail)
-#define NVBLAST_CHECK_INFO(_expr, _msg, _onFail) NVBLAST_CHECK(Nv::Blast::ErrorCode::eDEBUG_INFO, _expr, _msg, _onFail)
-#define NVBLAST_CHECK_DEBUG(_expr, _msg, _onFail) NVBLAST_CHECK(Nv::Blast::ErrorCode::eNO_ERROR, _expr, _msg, _onFail)
+#define NVBLAST_CHECK_ERROR(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eINVALID_OPERATION, _expr, _msg, _onFail)
+#define NVBLAST_CHECK_WARNING(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eDEBUG_WARNING, _expr, _msg, _onFail)
+#define NVBLAST_CHECK_INFO(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eDEBUG_INFO, _expr, _msg, _onFail)
+#define NVBLAST_CHECK_DEBUG(_expr, _msg, _onFail) NVBLAST_CHECK(nvidia::NvErrorCode::eNO_ERROR, _expr, _msg, _onFail)
//////// Misc ////////
diff --git a/blast/include/globals/NvCMath.h b/blast/include/globals/NvCMath.h
index 7663c12d4..81efdff95 100644
--- a/blast/include/globals/NvCMath.h
+++ b/blast/include/globals/NvCMath.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/lowlevel/NvBlast.h b/blast/include/lowlevel/NvBlast.h
index 25aab4883..edf6f017d 100644
--- a/blast/include/lowlevel/NvBlast.h
+++ b/blast/include/lowlevel/NvBlast.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -49,7 +49,7 @@ Use this function when building an asset with NvBlastCreateAsset.
\return the memory size (in bytes) required for the asset, or zero if desc is invalid.
*/
-NVBLAST_API size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn);
+NV_C_API size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn);
/**
Calculates the memory requirements for an asset based upon supplied sized data.
@@ -60,7 +60,7 @@ Used primarily with serialization.
\return the memory size (in bytes) required for the asset, or zero if data is invalid.
*/
-NVBLAST_API size_t NvBlastGetAssetMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn);
+NV_C_API size_t NvBlastGetAssetMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn);
/**
Returns the number of bytes of scratch memory that the user must supply to NvBlastCreateAsset,
@@ -71,7 +71,7 @@ based upon the descriptor that will be passed into that function.
\return the number of bytes of scratch memory required for a call to NvBlastCreateAsset with that descriptor.
*/
-NVBLAST_API size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn);
+NV_C_API size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn);
/**
@@ -98,7 +98,7 @@ If chunks aren't arranged properly the function fails to create an asset.
\return pointer to new NvBlastAsset (will be the same address as mem), or NULL if unsuccessful.
*/
-NVBLAST_API NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn);
+NV_C_API NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn);
/**
@@ -110,7 +110,7 @@ Use this function when building a family with NvBlastAssetCreateFamily.
\return the memory size (in bytes) required for the family, or zero if asset is invalid.
*/
-NVBLAST_API size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn);
/**
Calculates the memory requirements for a family based upon supplied sized data.
@@ -121,7 +121,7 @@ Used primarily with serialization.
\return the memory size (in bytes) required for the family, or zero if data is invalid.
*/
-NVBLAST_API size_t NvBlastAssetGetFamilyMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn);
+NV_C_API size_t NvBlastAssetGetFamilyMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn);
/**
@@ -131,7 +131,7 @@ Fill out the size data from the provided asset
\return Filled out size data struct.
*/
-NVBLAST_API NvBlastAssetMemSizeData NvBlastAssetMemSizeDataFromAsset(const NvBlastAsset* asset);
+NV_C_API NvBlastAssetMemSizeData NvBlastAssetMemSizeDataFromAsset(const NvBlastAsset* asset);
/**
Family-building function.
@@ -145,7 +145,7 @@ of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(asset, l
\return the family.
*/
-NVBLAST_API NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn);
/**
Family-building function.
@@ -159,7 +159,7 @@ of memory of at least the size given by NvBlastAssetGetFamilyMemorySize(sizeData
\return the family.
*/
-NVBLAST_API NvBlastFamily* NvBlastAssetCreateFamilyFromSizeData(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn);
+NV_C_API NvBlastFamily* NvBlastAssetCreateFamilyFromSizeData(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn);
/**
@@ -170,7 +170,7 @@ Retrieve the asset ID.
\return the ID of the asset.
*/
-NVBLAST_API NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -182,7 +182,7 @@ Set an asset's ID
\return true iff the id is successfully set.
*/
-NVBLAST_API bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn);
+NV_C_API bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn);
/**
@@ -193,7 +193,7 @@ Retrieve the data format version for the given asset
\return the data format version (NvBlastAssetDataFormat).
*/
-NVBLAST_API uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -204,7 +204,7 @@ Retrieve the memory size (in bytes) of the given data asset
\return the memory size of the asset (in bytes).
*/
-NVBLAST_API uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -215,7 +215,7 @@ Get the number of chunks in the given asset.
\return the number of chunks in the asset.
*/
-NVBLAST_API uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -229,7 +229,7 @@ and this function will return NvBlastSupportGraph::nodeCount - 1.
\return the number of chunks in the asset.
*/
-NVBLAST_API uint32_t NvBlastAssetGetSupportChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetSupportChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -240,7 +240,7 @@ Get the number of leaf chunks in the given asset.
\return the number of leaf chunks in the asset.
*/
-NVBLAST_API uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -253,7 +253,7 @@ chunks.
\return the first subsupport chunk index in the asset.
*/
-NVBLAST_API uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -264,7 +264,7 @@ Get the number of bonds in the given asset.
\return the number of bonds in the asset.
*/
-NVBLAST_API uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -275,7 +275,7 @@ Access the support graph for the given asset.
\return a struct of support graph for the given asset.
*/
-NVBLAST_API const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -288,7 +288,7 @@ Non-support chunks are mapped to the invalid index 0xFFFFFFFF.
\return an array of uint32_t values defining the map, of size NvBlastAssetGetChunkCount(asset, logFn).
*/
-NVBLAST_API const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -299,7 +299,7 @@ Access an array of chunks of the given asset.
\return a pointer to an array of chunks of the asset.
*/
-NVBLAST_API const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -310,7 +310,7 @@ Access an array of bonds of the given asset.
\return a pointer to an array of bonds of the asset.
*/
-NVBLAST_API const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -323,7 +323,7 @@ for actor serialization.
\return the required buffer size in bytes.
*/
-NVBLAST_API uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn);
///@} End NvBlastAsset functions
@@ -349,7 +349,7 @@ Chunk order depends on support coverage, so this function should be called befor
\return true iff coverage was already exact.
*/
-NVBLAST_API bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn);
+NV_C_API bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn);
/**
@@ -375,7 +375,7 @@ Iff chunks are already ordered correctly, function returns 'true' and identity c
\return true iff the chunks did not require reordering (chunkReorderMap is the identity map).
*/
-NVBLAST_API bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn);
+NV_C_API bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn);
/**
@@ -395,7 +395,7 @@ with new indices. Bonds are kept in the same order, but their 'chunkIndices' fie
\param[in] keepBondNormalChunkOrder If true, bond normals will be flipped if their chunk index order was reveresed by the reorder map.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
*/
-NVBLAST_API void NvBlastApplyAssetDescChunkReorderMap
+NV_C_API void NvBlastApplyAssetDescChunkReorderMap
(
NvBlastChunkDesc* reorderedChunkDescs,
const NvBlastChunkDesc* chunkDescs,
@@ -427,7 +427,7 @@ This overload of function reorders chunks in place.
\param[in] scratch User-supplied scratch storage, must point to chunkCount * sizeof(NvBlastChunkDesc) valid bytes of memory.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
*/
-NVBLAST_API void NvBlastApplyAssetDescChunkReorderMapInPlace
+NV_C_API void NvBlastApplyAssetDescChunkReorderMapInPlace
(
NvBlastChunkDesc* chunkDescs,
uint32_t chunkCount,
@@ -456,7 +456,7 @@ Function basically calls NvBlastBuildAssetDescChunkReorderMap and NvBlastApplyAs
\return true iff the chunks did not require reordering (chunkReorderMap is the identity map).
*/
-NVBLAST_API bool NvBlastReorderAssetDescChunks
+NV_C_API bool NvBlastReorderAssetDescChunks
(
NvBlastChunkDesc* chunkDescs,
uint32_t chunkCount,
@@ -484,7 +484,7 @@ Retrieve the data format version for the given family.
\return the family format version.
*/
-NVBLAST_API uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -495,7 +495,7 @@ Retrieve the asset of the given family.
\return pointer to the asset associated with the family.
*/
-NVBLAST_API const NvBlastAsset* NvBlastFamilyGetAsset(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API const NvBlastAsset* NvBlastFamilyGetAsset(const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -505,7 +505,7 @@ Set asset to the family. It should be the same asset as the one family was creat
\param[in] asset Asset to instance.
\param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL.
*/
-NVBLAST_API void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn);
+NV_C_API void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn);
/**
@@ -516,7 +516,7 @@ Retrieve the size (in bytes) of the given family.
\return the size of the family (in bytes).
*/
-NVBLAST_API uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -527,7 +527,7 @@ Retrieve the asset ID of the given family.
\return the ID of the asset associated with the family.
*/
-NVBLAST_API NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -538,7 +538,7 @@ Returns the number of bytes of scratch memory that the user must supply to NvBla
\return the number of bytes of scratch memory required for a call to NvBlastFamilyCreateFirstActor.
*/
-NVBLAST_API size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -551,7 +551,7 @@ Instance the family's asset into a new, unfractured actor.
\return pointer to new NvBlastActor if successful (the actor was successfully inserted into the family), or NULL if unsuccessful.
*/
-NVBLAST_API NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn);
+NV_C_API NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn);
/**
@@ -562,7 +562,7 @@ Retrieve the number of active actors associated with the given family.
\return the number of active actors in the family.
*/
-NVBLAST_API uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -576,7 +576,7 @@ IDs with the actors already present in the family.
\return the deserialized actor if successful, NULL otherwise.
*/
-NVBLAST_API NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn);
+NV_C_API NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn);
/**
@@ -589,7 +589,7 @@ Retrieve the active actors associated with the given family.
\return the number of actor pointers written to actors. This will not exceed actorsSize.
*/
-NVBLAST_API uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -601,7 +601,7 @@ Retrieve the actor associated with the given actor index.
\return pointer to actor associated with given actor index. NULL if there is no such actor or it is inactive.
*/
-NVBLAST_API NvBlastActor* NvBlastFamilyGetActorByIndex(const NvBlastFamily* family, uint32_t actorIndex, NvBlastLog logFn);
+NV_C_API NvBlastActor* NvBlastFamilyGetActorByIndex(const NvBlastFamily* family, uint32_t actorIndex, NvBlastLog logFn);
/**
Retrieve the actor associated with the given chunk.
@@ -612,7 +612,7 @@ Retrieve the actor associated with the given chunk.
\return pointer to actor associated with given chunk. NULL if there is no such actor.
*/
-NVBLAST_API NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn);
+NV_C_API NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn);
/**
@@ -624,7 +624,7 @@ NOTE: the returned array size equals the number of support chunks in the asset.
\return pointer to actor associated with given chunk. NULL if there is no such actor.
*/
-NVBLAST_API uint32_t* NvBlastFamilyGetChunkActorIndices(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API uint32_t* NvBlastFamilyGetChunkActorIndices(const NvBlastFamily* family, NvBlastLog logFn);
/**
@@ -635,7 +635,7 @@ Retrieve the max active actor count family could have.
\return the max number of active actors family could have.
*/
-NVBLAST_API uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn);
///@} End NvBlastFamily functions
@@ -653,7 +653,7 @@ Get the number of visible chunks for this actor. May be used in conjunction wit
\return the number of visible chunk indices for the actor.
*/
-NVBLAST_API uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -666,7 +666,7 @@ Retrieve a list of visible chunk indices for the actor into the given array.
\return the number of indices written to visibleChunkIndices. This will not exceed visibleChunkIndicesSize.
*/
-NVBLAST_API uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -677,7 +677,7 @@ Get the number of graph nodes for this actor. May be used in conjunction with N
\return the number of graph node indices for the actor.
*/
-NVBLAST_API uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -690,7 +690,7 @@ Retrieve a list of graph node indices for the actor into the given array.
\return the number of indices written to graphNodeIndices. This will not exceed graphNodeIndicesSize.
*/
-NVBLAST_API uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -713,7 +713,7 @@ If the input actor is invalid, NULL will be returned.
\return the array of bond healths for the actor's instance family, or NULL if the actor is invalid.
*/
-NVBLAST_API const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -736,7 +736,7 @@ If the input actor is invalid, NULL will be returned.
\return the array of bond healths for the actor's instance family, or NULL if the actor is invalid.
*/
-NVBLAST_API const float* NvBlastActorGetCachedBondHeaths(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API const float* NvBlastActorGetCachedBondHeaths(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -748,7 +748,7 @@ Tell the system to cache the bond health for the given bond index.
\return true if value was cached, false otherwise
*/
-NVBLAST_API bool NvBlastActorCacheBondHeath(const NvBlastActor* actor, uint32_t bondIndex, NvBlastLog logFn);
+NV_C_API bool NvBlastActorCacheBondHeath(const NvBlastActor* actor, uint32_t bondIndex, NvBlastLog logFn);
/**
@@ -760,7 +760,7 @@ on the buffer size needed for any actor instanced from an NvBlastAsset, use NvBl
\return the required buffer size in bytes.
*/
-NVBLAST_API uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -773,7 +773,7 @@ Serialize a single actor to a buffer.
\return the number of bytes written to the buffer, or 0 if there is an error (such as an under-sized buffer).
*/
-NVBLAST_API uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -784,7 +784,7 @@ Access to an actor's family.
\return the family with which the actor is associated.
*/
-NVBLAST_API NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -795,7 +795,7 @@ Access to an actor's internal index.
\return actor's internal index in family.
*/
-NVBLAST_API uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -806,7 +806,7 @@ Deactivate an actor within its family. Conceptually this is "destroying" the ac
\return true iff successful (actor was active).
*/
-NVBLAST_API bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn);
///@} End NvBlastActor accessor, serialization, and deactivation functions
@@ -838,7 +838,7 @@ As output:
Chunks and Bond userdata reflect the respective userdata set during asset initialization, where implemented by the material function.
Health values denote how much damage is to be applied.
*/
-NVBLAST_API void NvBlastActorGenerateFracture
+NV_C_API void NvBlastActorGenerateFracture
(
NvBlastFractureBuffers* commandBuffers,
const NvBlastActor* actor,
@@ -879,7 +879,7 @@ eventBuffers as output:
commands and eventBuffers may point to the same memory.
*/
-NVBLAST_API void NvBlastActorApplyFracture
+NV_C_API void NvBlastActorApplyFracture
(
NvBlastFractureBuffers* eventBuffers,
NvBlastActor* actor,
@@ -902,7 +902,7 @@ Releases the oldActor and creates its children newActors if necessary.
\return 1..n: new actors were created
\return 0: oldActor is unchanged
*/
-NVBLAST_API uint32_t NvBlastActorSplit
+NV_C_API uint32_t NvBlastActorSplit
(
NvBlastActorSplitEvent* result,
NvBlastActor* actor,
@@ -922,7 +922,7 @@ based upon the actor that will be passed into that function.
\return the number of bytes of scratch memory required for a call to NvBlastActorSplit with that actor.
*/
-NVBLAST_API size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -934,7 +934,7 @@ value can't exceed chunk count.
\return the upper-bound number of actors which can be created by calling NvBlastActorSplit with that actor.
*/
-NVBLAST_API uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -945,7 +945,7 @@ Determines if the actor can fracture further.
\return true if any result can be expected from fracturing the actor. false if no further change to the actor is possible.
*/
-NVBLAST_API bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn);
/**
@@ -957,13 +957,13 @@ If actor is not damaged calling NvBlastActorSplit will make no effect.
\return true iff split call is required for this actor.
*/
-NVBLAST_API bool NvBlastActorIsSplitRequired(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API bool NvBlastActorIsSplitRequired(const NvBlastActor* actor, NvBlastLog logFn);
/**
\return true iff this actor contains the "external" support graph node, created when a bond contains the UINT32_MAX value for one of their chunkIndices.
*/
-NVBLAST_API bool NvBlastActorHasExternalBonds(const NvBlastActor* actor, NvBlastLog logFn);
+NV_C_API bool NvBlastActorHasExternalBonds(const NvBlastActor* actor, NvBlastLog logFn);
// DEPRICATED: remove on next major version bump
#define NvBlastActorIsBoundToWorld NvBlastActorHasExternalBonds
@@ -981,7 +981,7 @@ Resets all values in the given NvBlastTimers struct to zero.
\param[in] timers The NvBlastTimers to set to zero.
*/
-NVBLAST_API void NvBlastTimersReset(NvBlastTimers* timers);
+NV_C_API void NvBlastTimersReset(NvBlastTimers* timers);
/**
@@ -991,7 +991,7 @@ Convert a tick value from NvBlastTimers to seconds.
\return the seconds correposnding to the input tick value.
*/
-NVBLAST_API double NvBlastTicksToSeconds(int64_t ticks);
+NV_C_API double NvBlastTicksToSeconds(int64_t ticks);
///@} End NvBlastTimers functions and helpers
diff --git a/blast/include/lowlevel/NvBlastTypes.h b/blast/include/lowlevel/NvBlastTypes.h
index 753954b0e..a47ac21f1 100644
--- a/blast/include/lowlevel/NvBlastTypes.h
+++ b/blast/include/lowlevel/NvBlastTypes.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -32,7 +32,7 @@
#define NVBLASTTYPES_H
-#include "NvBlastPreprocessor.h"
+#include "NvPreprocessor.h"
#include
#include
diff --git a/blast/include/shared/NvFoundation/Nv.h b/blast/include/shared/NvFoundation/Nv.h
new file mode 100644
index 000000000..14ea7ab8b
--- /dev/null
+++ b/blast/include/shared/NvFoundation/Nv.h
@@ -0,0 +1,88 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NV_H
+#define NV_NVFOUNDATION_NV_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvSimpleTypes.h"
+
+/** files to always include */
+#include
+#include
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+class NvAllocatorCallback;
+class NvErrorCallback;
+struct NvErrorCode;
+class NvAssertHandler;
+
+class NvInputStream;
+class NvInputData;
+class NvOutputStream;
+
+class NvVec2;
+class NvVec3;
+class NvVec4;
+class NvMat33;
+class NvMat44;
+class NvPlane;
+class NvQuat;
+class NvTransform;
+class NvBounds3;
+
+/** enum for empty constructor tag*/
+enum NvEMPTY
+{
+ NvEmpty
+};
+
+/** enum for zero constructor tag for vectors and matrices */
+enum NvZERO
+{
+ NvZero
+};
+
+/** enum for identity constructor flag for quaternions, transforms, and matrices */
+enum NvIDENTITY
+{
+ NvIdentity
+};
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NV_H
diff --git a/blast/include/shared/NvFoundation/NvAllocatorCallback.h b/blast/include/shared/NvFoundation/NvAllocatorCallback.h
new file mode 100644
index 000000000..fc2aaff90
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvAllocatorCallback.h
@@ -0,0 +1,94 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVALLOCATORCALLBACK_H
+#define NV_NVFOUNDATION_NVALLOCATORCALLBACK_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "Nv.h"
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/**
+\brief Abstract base class for an application defined memory allocator that can be used by the Nv library.
+
+\note The SDK state should not be modified from within any allocation/free function.
+
+Threading: All methods of this class should be thread safe as it can be called from the user thread
+or the physics processing thread(s).
+*/
+
+class NvAllocatorCallback
+{
+ public:
+ /**
+ \brief destructor
+ */
+ virtual ~NvAllocatorCallback()
+ {
+ }
+
+ /**
+ \brief Allocates size bytes of memory, which must be 16-byte aligned.
+
+ This method should never return NULL. If you run out of memory, then
+ you should terminate the app or take some other appropriate action.
+
+ Threading: This function should be thread safe as it can be called in the context of the user thread
+ and physics processing thread(s).
+
+ \param size Number of bytes to allocate.
+ \param typeName Name of the datatype that is being allocated
+ \param filename The source file which allocated the memory
+ \param line The source line which allocated the memory
+ \return The allocated block of memory.
+ */
+ virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) = 0;
+
+ /**
+ \brief Frees memory previously allocated by allocate().
+
+ Threading: This function should be thread safe as it can be called in the context of the user thread
+ and physics processing thread(s).
+
+ \param ptr Memory to free.
+ */
+ virtual void deallocate(void* ptr) = 0;
+};
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVALLOCATORCALLBACK_H
diff --git a/blast/include/shared/NvFoundation/NvAssert.h b/blast/include/shared/NvFoundation/NvAssert.h
new file mode 100644
index 000000000..d895462a4
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvAssert.h
@@ -0,0 +1,96 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVASSERT_H
+#define NV_NVFOUNDATION_NVASSERT_H
+
+/** \addtogroup foundation
+@{ */
+
+#include "Nv.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/* Base class to handle assert failures */
+class NvAssertHandler
+{
+ public:
+ virtual ~NvAssertHandler()
+ {
+ }
+ virtual void operator()(const char* exp, const char* file, int line, bool& ignore) = 0;
+};
+
+NV_FOUNDATION_API NvAssertHandler& NvGetAssertHandler();
+NV_FOUNDATION_API void NvSetAssertHandler(NvAssertHandler& handler);
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+#if !NV_ENABLE_ASSERTS
+#define NV_ASSERT(exp) ((void)0)
+#define NV_ALWAYS_ASSERT_MESSAGE(exp) ((void)0)
+#define NV_ASSERT_WITH_MESSAGE(condition, message) ((void)0)
+#elif NV_SPU
+#include "ps3/NvPS3Assert.h"
+#else
+#if NV_VC
+#define NV_CODE_ANALYSIS_ASSUME(exp) \
+ __analysis_assume(!!(exp)) // This macro will be used to get rid of analysis warning messages if a NV_ASSERT is used
+// to "guard" illegal mem access, for example.
+#else
+#define NV_CODE_ANALYSIS_ASSUME(exp)
+#endif
+#define NV_ASSERT(exp) \
+ { \
+ static bool _ignore = false; \
+ ((void)((!!(exp)) || (!_ignore && (nvidia::NvGetAssertHandler()(#exp, __FILE__, __LINE__, _ignore), false)))); \
+ NV_CODE_ANALYSIS_ASSUME(exp); \
+ }
+#define NV_ALWAYS_ASSERT_MESSAGE(exp) \
+ { \
+ static bool _ignore = false; \
+ if(!_ignore) \
+ nvidia::NvGetAssertHandler()(exp, __FILE__, __LINE__, _ignore); \
+ }
+#define NV_ASSERT_WITH_MESSAGE(exp, message) \
+ { \
+ static bool _ignore = false; \
+ ((void)((!!(exp)) || (!_ignore && (nvidia::NvGetAssertHandler()(message, __FILE__, __LINE__, _ignore), false)))); \
+ NV_CODE_ANALYSIS_ASSUME(exp); \
+ }
+#endif
+
+#define NV_ALWAYS_ASSERT() NV_ASSERT(0)
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVASSERT_H
diff --git a/blast/include/shared/NvFoundation/NvBounds3.h b/blast/include/shared/NvFoundation/NvBounds3.h
new file mode 100644
index 000000000..738a69562
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvBounds3.h
@@ -0,0 +1,479 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVBOUNDS3_H
+#define NV_NVFOUNDATION_NVBOUNDS3_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvTransform.h"
+#include "NvMat33.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+// maximum extents defined such that floating point exceptions are avoided for standard use cases
+#define NV_MAX_BOUNDS_EXTENTS (NV_MAX_REAL * 0.25f)
+
+/**
+\brief Class representing 3D range or axis aligned bounding box.
+
+Stored as minimum and maximum extent corners. Alternate representation
+would be center and dimensions.
+May be empty or nonempty. For nonempty bounds, minimum <= maximum has to hold for all axes.
+Empty bounds have to be represented as minimum = NV_MAX_BOUNDS_EXTENTS and maximum = -NV_MAX_BOUNDS_EXTENTS for all
+axes.
+All other representations are invalid and the behavior is undefined.
+*/
+class NvBounds3
+{
+ public:
+ /**
+ \brief Default constructor, not performing any initialization for performance reason.
+ \remark Use empty() function below to construct empty bounds.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3()
+ {
+ }
+
+ /**
+ \brief Construct from two bounding points
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3(const NvVec3& minimum, const NvVec3& maximum);
+
+ /**
+ \brief Return empty bounds.
+ */
+ static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 empty();
+
+ /**
+ \brief returns the AABB containing v0 and v1.
+ \param v0 first point included in the AABB.
+ \param v1 second point included in the AABB.
+ */
+ static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 boundsOfPoints(const NvVec3& v0, const NvVec3& v1);
+
+ /**
+ \brief returns the AABB from center and extents vectors.
+ \param center Center vector
+ \param extent Extents vector
+ */
+ static NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 centerExtents(const NvVec3& center, const NvVec3& extent);
+
+ /**
+ \brief Construct from center, extent, and (not necessarily orthogonal) basis
+ */
+ static NV_CUDA_CALLABLE NV_INLINE NvBounds3
+ basisExtent(const NvVec3& center, const NvMat33& basis, const NvVec3& extent);
+
+ /**
+ \brief Construct from pose and extent
+ */
+ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 poseExtent(const NvTransform& pose, const NvVec3& extent);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ This version is safe to call for empty bounds.
+
+ \param[in] matrix Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformSafe(const NvMat33& matrix, const NvBounds3& bounds);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead.
+
+ \param[in] matrix Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformFast(const NvMat33& matrix, const NvBounds3& bounds);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ This version is safe to call for empty bounds.
+
+ \param[in] transform Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformSafe(const NvTransform& transform, const NvBounds3& bounds);
+
+ /**
+ \brief gets the transformed bounds of the passed AABB (resulting in a bigger AABB).
+
+ Calling this method for empty bounds leads to undefined behavior. Use #transformSafe() instead.
+
+ \param[in] transform Transform to apply, can contain scaling as well
+ \param[in] bounds The bounds to transform.
+ */
+ static NV_CUDA_CALLABLE NV_INLINE NvBounds3 transformFast(const NvTransform& transform, const NvBounds3& bounds);
+
+ /**
+ \brief Sets empty to true
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void setEmpty();
+
+ /**
+ \brief Sets the bounds to maximum size [-NV_MAX_BOUNDS_EXTENTS, NV_MAX_BOUNDS_EXTENTS].
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void setMaximal();
+
+ /**
+ \brief expands the volume to include v
+ \param v Point to expand to.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void include(const NvVec3& v);
+
+ /**
+ \brief expands the volume to include b.
+ \param b Bounds to perform union with.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void include(const NvBounds3& b);
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isEmpty() const;
+
+ /**
+ \brief indicates whether the intersection of this and b is empty or not.
+ \param b Bounds to test for intersection.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool intersects(const NvBounds3& b) const;
+
+ /**
+ \brief computes the 1D-intersection between two AABBs, on a given axis.
+ \param a the other AABB
+ \param axis the axis (0, 1, 2)
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool intersects1D(const NvBounds3& a, uint32_t axis) const;
+
+ /**
+ \brief indicates if these bounds contain v.
+ \param v Point to test against bounds.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool contains(const NvVec3& v) const;
+
+ /**
+ \brief checks a box is inside another box.
+ \param box the other AABB
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isInside(const NvBounds3& box) const;
+
+ /**
+ \brief returns the center of this axis aligned box.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getCenter() const;
+
+ /**
+ \brief get component of the box's center along a given axis
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float getCenter(uint32_t axis) const;
+
+ /**
+ \brief get component of the box's extents along a given axis
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float getExtents(uint32_t axis) const;
+
+ /**
+ \brief returns the dimensions (width/height/depth) of this axis aligned box.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getDimensions() const;
+
+ /**
+ \brief returns the extents, which are half of the width/height/depth.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getExtents() const;
+
+ /**
+ \brief scales the AABB.
+
+ This version is safe to call for empty bounds.
+
+ \param scale Factor to scale AABB by.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void scaleSafe(float scale);
+
+ /**
+ \brief scales the AABB.
+
+ Calling this method for empty bounds leads to undefined behavior. Use #scaleSafe() instead.
+
+ \param scale Factor to scale AABB by.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void scaleFast(float scale);
+
+ /**
+ fattens the AABB in all 3 dimensions by the given distance.
+
+ This version is safe to call for empty bounds.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void fattenSafe(float distance);
+
+ /**
+ fattens the AABB in all 3 dimensions by the given distance.
+
+ Calling this method for empty bounds leads to undefined behavior. Use #fattenSafe() instead.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void fattenFast(float distance);
+
+ /**
+ checks that the AABB values are not NaN
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite() const;
+
+ /**
+ checks that the AABB values describe a valid configuration.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isValid() const;
+
+ NvVec3 minimum, maximum;
+};
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3::NvBounds3(const NvVec3& minimum_, const NvVec3& maximum_)
+: minimum(minimum_), maximum(maximum_)
+{
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::empty()
+{
+ return NvBounds3(NvVec3(NV_MAX_BOUNDS_EXTENTS), NvVec3(-NV_MAX_BOUNDS_EXTENTS));
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isFinite() const
+{
+ return minimum.isFinite() && maximum.isFinite();
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::boundsOfPoints(const NvVec3& v0, const NvVec3& v1)
+{
+ return NvBounds3(v0.minimum(v1), v0.maximum(v1));
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvBounds3 NvBounds3::centerExtents(const NvVec3& center, const NvVec3& extent)
+{
+ return NvBounds3(center - extent, center + extent);
+}
+
+NV_CUDA_CALLABLE NV_INLINE NvBounds3
+NvBounds3::basisExtent(const NvVec3& center, const NvMat33& basis, const NvVec3& extent)
+{
+ // extended basis vectors
+ NvVec3 c0 = basis.column0 * extent.x;
+ NvVec3 c1 = basis.column1 * extent.y;
+ NvVec3 c2 = basis.column2 * extent.z;
+
+ NvVec3 w;
+ // find combination of base vectors that produces max. distance for each component = sum of abs()
+ w.x = NvAbs(c0.x) + NvAbs(c1.x) + NvAbs(c2.x);
+ w.y = NvAbs(c0.y) + NvAbs(c1.y) + NvAbs(c2.y);
+ w.z = NvAbs(c0.z) + NvAbs(c1.z) + NvAbs(c2.z);
+
+ return NvBounds3(center - w, center + w);
+}
+
+NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::poseExtent(const NvTransform& pose, const NvVec3& extent)
+{
+ return basisExtent(pose.p, NvMat33(pose.q), extent);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::setEmpty()
+{
+ minimum = NvVec3(NV_MAX_BOUNDS_EXTENTS);
+ maximum = NvVec3(-NV_MAX_BOUNDS_EXTENTS);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::setMaximal()
+{
+ minimum = NvVec3(-NV_MAX_BOUNDS_EXTENTS);
+ maximum = NvVec3(NV_MAX_BOUNDS_EXTENTS);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::include(const NvVec3& v)
+{
+ NV_ASSERT(isValid());
+ minimum = minimum.minimum(v);
+ maximum = maximum.maximum(v);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::include(const NvBounds3& b)
+{
+ NV_ASSERT(isValid());
+ minimum = minimum.minimum(b.minimum);
+ maximum = maximum.maximum(b.maximum);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isEmpty() const
+{
+ NV_ASSERT(isValid());
+ return minimum.x > maximum.x;
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::intersects(const NvBounds3& b) const
+{
+ NV_ASSERT(isValid() && b.isValid());
+ return !(b.minimum.x > maximum.x || minimum.x > b.maximum.x || b.minimum.y > maximum.y || minimum.y > b.maximum.y ||
+ b.minimum.z > maximum.z || minimum.z > b.maximum.z);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::intersects1D(const NvBounds3& a, uint32_t axis) const
+{
+ NV_ASSERT(isValid() && a.isValid());
+ return maximum[axis] >= a.minimum[axis] && a.maximum[axis] >= minimum[axis];
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::contains(const NvVec3& v) const
+{
+ NV_ASSERT(isValid());
+
+ return !(v.x < minimum.x || v.x > maximum.x || v.y < minimum.y || v.y > maximum.y || v.z < minimum.z ||
+ v.z > maximum.z);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isInside(const NvBounds3& box) const
+{
+ NV_ASSERT(isValid() && box.isValid());
+ if(box.minimum.x > minimum.x)
+ return false;
+ if(box.minimum.y > minimum.y)
+ return false;
+ if(box.minimum.z > minimum.z)
+ return false;
+ if(box.maximum.x < maximum.x)
+ return false;
+ if(box.maximum.y < maximum.y)
+ return false;
+ if(box.maximum.z < maximum.z)
+ return false;
+ return true;
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getCenter() const
+{
+ NV_ASSERT(isValid());
+ return (minimum + maximum) * 0.5f;
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvBounds3::getCenter(uint32_t axis) const
+{
+ NV_ASSERT(isValid());
+ return (minimum[axis] + maximum[axis]) * 0.5f;
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvBounds3::getExtents(uint32_t axis) const
+{
+ NV_ASSERT(isValid());
+ return (maximum[axis] - minimum[axis]) * 0.5f;
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getDimensions() const
+{
+ NV_ASSERT(isValid());
+ return maximum - minimum;
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 NvBounds3::getExtents() const
+{
+ NV_ASSERT(isValid());
+ return getDimensions() * 0.5f;
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::scaleSafe(float scale)
+{
+ NV_ASSERT(isValid());
+ if(!isEmpty())
+ scaleFast(scale);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::scaleFast(float scale)
+{
+ NV_ASSERT(isValid());
+ *this = centerExtents(getCenter(), getExtents() * scale);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::fattenSafe(float distance)
+{
+ NV_ASSERT(isValid());
+ if(!isEmpty())
+ fattenFast(distance);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE void NvBounds3::fattenFast(float distance)
+{
+ NV_ASSERT(isValid());
+ minimum.x -= distance;
+ minimum.y -= distance;
+ minimum.z -= distance;
+
+ maximum.x += distance;
+ maximum.y += distance;
+ maximum.z += distance;
+}
+
+NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformSafe(const NvMat33& matrix, const NvBounds3& bounds)
+{
+ NV_ASSERT(bounds.isValid());
+ return !bounds.isEmpty() ? transformFast(matrix, bounds) : bounds;
+}
+
+NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformFast(const NvMat33& matrix, const NvBounds3& bounds)
+{
+ NV_ASSERT(bounds.isValid());
+ return NvBounds3::basisExtent(matrix * bounds.getCenter(), matrix, bounds.getExtents());
+}
+
+NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformSafe(const NvTransform& transform, const NvBounds3& bounds)
+{
+ NV_ASSERT(bounds.isValid());
+ return !bounds.isEmpty() ? transformFast(transform, bounds) : bounds;
+}
+
+NV_CUDA_CALLABLE NV_INLINE NvBounds3 NvBounds3::transformFast(const NvTransform& transform, const NvBounds3& bounds)
+{
+ NV_ASSERT(bounds.isValid());
+ return NvBounds3::basisExtent(transform.transform(bounds.getCenter()), NvMat33(transform.q), bounds.getExtents());
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvBounds3::isValid() const
+{
+ return (isFinite() && (((minimum.x <= maximum.x) && (minimum.y <= maximum.y) && (minimum.z <= maximum.z)) ||
+ ((minimum.x == NV_MAX_BOUNDS_EXTENTS) && (minimum.y == NV_MAX_BOUNDS_EXTENTS) &&
+ (minimum.z == NV_MAX_BOUNDS_EXTENTS) && (maximum.x == -NV_MAX_BOUNDS_EXTENTS) &&
+ (maximum.y == -NV_MAX_BOUNDS_EXTENTS) && (maximum.z == -NV_MAX_BOUNDS_EXTENTS))));
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVBOUNDS3_H
diff --git a/blast/include/lowlevel/NvCTypes.h b/blast/include/shared/NvFoundation/NvCTypes.h
similarity index 92%
rename from blast/include/lowlevel/NvCTypes.h
rename to blast/include/shared/NvFoundation/NvCTypes.h
index 5ba63696e..a691ade17 100644
--- a/blast/include/lowlevel/NvCTypes.h
+++ b/blast/include/shared/NvFoundation/NvCTypes.h
@@ -22,13 +22,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2008-2022 NVIDIA Corporation. All rights reserved.
-// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
-// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
-
-//! @file
-//!
-//! @brief Defines simple C vector types
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_C_TYPES_H
#define NV_C_TYPES_H
@@ -122,4 +118,6 @@ typedef struct
int32_t x, y, z, w;
} NvcVec4i;
+/** @} */
+
#endif // NV_C_TYPES_H
diff --git a/blast/include/globals/NvBlastProfiler.h b/blast/include/shared/NvFoundation/NvErrorCallback.h
similarity index 54%
rename from blast/include/globals/NvBlastProfiler.h
rename to blast/include/shared/NvFoundation/NvErrorCallback.h
index 3cf9f2b18..90bccfd46 100644
--- a/blast/include/globals/NvBlastProfiler.h
+++ b/blast/include/shared/NvFoundation/NvErrorCallback.h
@@ -22,79 +22,51 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
-//! @file
-//!
-//! @brief Profiler utility API in the NvBlastGlobals library
-
-#ifndef NVBLASTPROFILER_H
-#define NVBLASTPROFILER_H
-
-#include "NvBlastPreprocessor.h"
+#ifndef NV_NVFOUNDATION_NVERRORCALLBACK_H
+#define NV_NVFOUNDATION_NVERRORCALLBACK_H
+/** \addtogroup foundation
+@{
+*/
-namespace Nv
+#include "NvErrors.h"
+#if !NV_DOXYGEN
+namespace nvidia
{
-namespace Blast
-{
-
+#endif
/**
-Custom Blast profiler interface.
-*/
-class ProfilerCallback
-{
-protected:
- virtual ~ProfilerCallback() {}
-
-public:
- /**
- Called when a nested profile zone starts.
- */
- virtual void zoneStart(const char* name) = 0;
-
- /**
- Called when the current profile zone ends.
- */
- virtual void zoneEnd() = 0;
-};
+\brief User defined interface class. Used by the library to emit debug information.
+\note The SDK state should not be modified from within any error reporting functions.
-/**
-Profiler detail to be reported. The higher setting is used, the more details are reported.
+Threading: The SDK sequences its calls to the output stream using a mutex, so the class need not
+be implemented in a thread-safe manner if the SDK is the only client.
*/
-struct ProfilerDetail
+class NvErrorCallback
{
- enum Level
+ public:
+ virtual ~NvErrorCallback()
{
- LOW,
- MEDIUM,
- HIGH
- };
-};
-
-
-} // namespace Blast
-} // namespace Nv
-
-
-/**
-Profiler features are only active in checked, debug and profile builds.
-*/
-
-/**
-Set a custom profiler callback. May be nullptr (the default).
-*/
-NVBLAST_API void NvBlastProfilerSetCallback(Nv::Blast::ProfilerCallback* pcb);
-
-
-/**
-Sets the depth of reported profile zones.
-Higher levels (more nesting) of instrumentation can have a significant impact.
-Defaults to Nv::Blast::ProfilerDetail::Level::LOW.
-*/
-NVBLAST_API void NvBlastProfilerSetDetail(Nv::Blast::ProfilerDetail::Level);
+ }
+ /**
+ \brief Reports an error code.
+ \param code Error code, see #NvErrorCode
+ \param message Message to display.
+ \param file File error occured in.
+ \param line Line number error occured on.
+ */
+ virtual void reportError(NvErrorCode::Enum code, const char* message, const char* file, int line) = 0;
+};
+#if !NV_DOXYGEN
+} // namespace nvidia
#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVERRORCALLBACK_H
diff --git a/blast/include/shared/NvFoundation/NvErrors.h b/blast/include/shared/NvFoundation/NvErrors.h
new file mode 100644
index 000000000..c10a27dbb
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvErrors.h
@@ -0,0 +1,92 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVERRORS_H
+#define NV_NVFOUNDATION_NVERRORS_H
+/** \addtogroup foundation
+@{
+*/
+
+#include "Nv.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/**
+\brief Error codes
+
+These error codes are passed to #NvErrorCallback
+
+@see NvErrorCallback
+*/
+
+struct NvErrorCode
+{
+ enum Enum
+ {
+ eNO_ERROR = 0,
+
+ //! \brief An informational message.
+ eDEBUG_INFO = 1,
+
+ //! \brief a warning message for the user to help with debugging
+ eDEBUG_WARNING = 2,
+
+ //! \brief method called with invalid parameter(s)
+ eINVALID_PARAMETER = 4,
+
+ //! \brief method was called at a time when an operation is not possible
+ eINVALID_OPERATION = 8,
+
+ //! \brief method failed to allocate some memory
+ eOUT_OF_MEMORY = 16,
+
+ /** \brief The library failed for some reason.
+ Possibly you have passed invalid values like NaNs, which are not checked for.
+ */
+ eINTERNAL_ERROR = 32,
+
+ //! \brief An unrecoverable error, execution should be halted and log output flushed
+ eABORT = 64,
+
+ //! \brief The SDK has determined that an operation may result in poor performance.
+ ePERF_WARNING = 128,
+
+ //! \brief A bit mask for including all errors
+ eMASK_ALL = -1
+ };
+};
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVERRORS_H
diff --git a/blast/include/shared/NvFoundation/NvFlags.h b/blast/include/shared/NvFoundation/NvFlags.h
new file mode 100644
index 000000000..6dd5d6b94
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvFlags.h
@@ -0,0 +1,374 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVFLAGS_H
+#define NV_NVFOUNDATION_NVFLAGS_H
+
+/** \addtogroup foundation
+ @{
+*/
+
+#include "Nv.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+/**
+\brief Container for bitfield flag variables associated with a specific enum type.
+
+This allows for type safe manipulation for bitfields.
+
+Example
+ // enum that defines each bit...
+ struct MyEnum
+ {
+ enum Enum
+ {
+ eMAN = 1,
+ eBEAR = 2,
+ ePIG = 4,
+ };
+ };
+
+ // implements some convenient global operators.
+ NV_FLAGS_OPERATORS(MyEnum::Enum, uint8_t);
+
+ NvFlags myFlags;
+ myFlags |= MyEnum::eMAN;
+ myFlags |= MyEnum::eBEAR | MyEnum::ePIG;
+ if(myFlags & MyEnum::eBEAR)
+ {
+ doSomething();
+ }
+*/
+
+template
+class NvFlags
+{
+ public:
+ typedef storagetype InternalType;
+
+ NV_INLINE explicit NvFlags(const NvEMPTY)
+ {
+ }
+ NV_INLINE NvFlags(void);
+ NV_INLINE NvFlags(enumtype e);
+ NV_INLINE NvFlags(const NvFlags& f);
+ NV_INLINE explicit NvFlags(storagetype b);
+
+ NV_INLINE bool isSet(enumtype e) const;
+ NV_INLINE NvFlags& set(enumtype e);
+ NV_INLINE bool operator==(enumtype e) const;
+ NV_INLINE bool operator==(const NvFlags& f) const;
+ NV_INLINE bool operator==(bool b) const;
+ NV_INLINE bool operator!=(enumtype e) const;
+ NV_INLINE bool operator!=(const NvFlags& f) const;
+
+ NV_INLINE NvFlags& operator=(const NvFlags& f);
+ NV_INLINE NvFlags& operator=(enumtype e);
+
+ NV_INLINE NvFlags& operator|=(enumtype e);
+ NV_INLINE NvFlags& operator|=(const NvFlags& f);
+ NV_INLINE NvFlags operator|(enumtype e) const;
+ NV_INLINE NvFlags operator|(const NvFlags& f) const;
+
+ NV_INLINE NvFlags& operator&=(enumtype e);
+ NV_INLINE NvFlags& operator&=(const NvFlags& f);
+ NV_INLINE NvFlags operator&(enumtype e) const;
+ NV_INLINE NvFlags operator&(const NvFlags& f) const;
+
+ NV_INLINE NvFlags& operator^=(enumtype e);
+ NV_INLINE NvFlags& operator^=(const NvFlags& f);
+ NV_INLINE NvFlags operator^(enumtype e) const;
+ NV_INLINE NvFlags operator^(const NvFlags& f) const;
+
+ NV_INLINE NvFlags operator~(void) const;
+
+ NV_INLINE operator bool(void) const;
+ NV_INLINE operator uint8_t(void) const;
+ NV_INLINE operator uint16_t(void) const;
+ NV_INLINE operator uint32_t(void) const;
+
+ NV_INLINE void clear(enumtype e);
+
+ public:
+ friend NV_INLINE NvFlags operator&(enumtype a, NvFlags& b)
+ {
+ NvFlags out;
+ out.mBits = a & b.mBits;
+ return out;
+ }
+
+ private:
+ storagetype mBits;
+};
+
+#define NV_FLAGS_OPERATORS(enumtype, storagetype) \
+ NV_INLINE NvFlags operator|(enumtype a, enumtype b) \
+ { \
+ NvFlags r(a); \
+ r |= b; \
+ return r; \
+ } \
+ NV_INLINE NvFlags operator&(enumtype a, enumtype b) \
+ { \
+ NvFlags r(a); \
+ r &= b; \
+ return r; \
+ } \
+ NV_INLINE NvFlags operator~(enumtype a) \
+ { \
+ return ~NvFlags(a); \
+ }
+
+#define NV_FLAGS_TYPEDEF(x, y) \
+ typedef NvFlags x##s; \
+ NV_FLAGS_OPERATORS(x::Enum, y)
+
+template
+NV_INLINE NvFlags::NvFlags(void)
+{
+ mBits = 0;
+}
+
+template
+NV_INLINE NvFlags::NvFlags(enumtype e)
+{
+ mBits = static_cast(e);
+}
+
+template
+NV_INLINE NvFlags::NvFlags(const NvFlags& f)
+{
+ mBits = f.mBits;
+}
+
+template
+NV_INLINE NvFlags::NvFlags(storagetype b)
+{
+ mBits = b;
+}
+
+template
+NV_INLINE bool NvFlags::isSet(enumtype e) const
+{
+ return (mBits & static_cast(e)) == static_cast(e);
+}
+
+template
+NV_INLINE NvFlags& NvFlags::set(enumtype e)
+{
+ mBits = static_cast(e);
+ return *this;
+}
+
+template
+NV_INLINE bool NvFlags::operator==(enumtype e) const
+{
+ return mBits == static_cast(e);
+}
+
+template
+NV_INLINE bool NvFlags::operator==(const NvFlags& f) const
+{
+ return mBits == f.mBits;
+}
+
+template
+NV_INLINE bool NvFlags::operator==(bool b) const
+{
+ return bool(*this) == b;
+}
+
+template
+NV_INLINE bool NvFlags::operator!=(enumtype e) const
+{
+ return mBits != static_cast(e);
+}
+
+template
+NV_INLINE bool NvFlags::operator!=(const NvFlags& f) const
+{
+ return mBits != f.mBits;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::operator=(enumtype e)
+{
+ mBits = static_cast(e);
+ return *this;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::operator=(const NvFlags& f)
+{
+ mBits = f.mBits;
+ return *this;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::operator|=(enumtype e)
+{
+ mBits |= static_cast(e);
+ return *this;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::
+operator|=(const NvFlags& f)
+{
+ mBits |= f.mBits;
+ return *this;
+}
+
+template
+NV_INLINE NvFlags NvFlags::operator|(enumtype e) const
+{
+ NvFlags out(*this);
+ out |= e;
+ return out;
+}
+
+template
+NV_INLINE NvFlags NvFlags::
+operator|(const NvFlags& f) const
+{
+ NvFlags out(*this);
+ out |= f;
+ return out;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::operator&=(enumtype e)
+{
+ mBits &= static_cast(e);
+ return *this;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::
+operator&=(const NvFlags& f)
+{
+ mBits &= f.mBits;
+ return *this;
+}
+
+template
+NV_INLINE NvFlags NvFlags::operator&(enumtype e) const
+{
+ NvFlags out = *this;
+ out.mBits &= static_cast(e);
+ return out;
+}
+
+template
+NV_INLINE NvFlags NvFlags::
+operator&(const NvFlags& f) const
+{
+ NvFlags out = *this;
+ out.mBits &= f.mBits;
+ return out;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::operator^=(enumtype e)
+{
+ mBits ^= static_cast(e);
+ return *this;
+}
+
+template
+NV_INLINE NvFlags& NvFlags::
+operator^=(const NvFlags& f)
+{
+ mBits ^= f.mBits;
+ return *this;
+}
+
+template
+NV_INLINE NvFlags NvFlags::operator^(enumtype e) const
+{
+ NvFlags out = *this;
+ out.mBits ^= static_cast(e);
+ return out;
+}
+
+template
+NV_INLINE NvFlags NvFlags::
+operator^(const NvFlags& f) const
+{
+ NvFlags out = *this;
+ out.mBits ^= f.mBits;
+ return out;
+}
+
+template
+NV_INLINE NvFlags NvFlags::operator~(void) const
+{
+ NvFlags out;
+ out.mBits = storagetype(~mBits);
+ return out;
+}
+
+template
+NV_INLINE NvFlags::operator bool(void) const
+{
+ return mBits ? true : false;
+}
+
+template
+NV_INLINE NvFlags::operator uint8_t(void) const
+{
+ return static_cast(mBits);
+}
+
+template
+NV_INLINE NvFlags::operator uint16_t(void) const
+{
+ return static_cast(mBits);
+}
+
+template
+NV_INLINE NvFlags::operator uint32_t(void) const
+{
+ return static_cast(mBits);
+}
+
+template
+NV_INLINE void NvFlags::clear(enumtype e)
+{
+ mBits &= ~static_cast(e);
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVFLAGS_H
diff --git a/blast/include/shared/NvFoundation/NvIO.h b/blast/include/shared/NvFoundation/NvIO.h
new file mode 100644
index 000000000..5e892cf98
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvIO.h
@@ -0,0 +1,137 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVIO_H
+#define NV_NVFOUNDATION_NVIO_H
+
+/** \addtogroup common
+ @{
+*/
+
+#include "NvSimpleTypes.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/**
+\brief Input stream class for I/O.
+
+The user needs to supply a NvInputStream implementation to a number of methods to allow the SDK to read data.
+*/
+
+class NvInputStream
+{
+ public:
+ /**
+ \brief read from the stream. The number of bytes read may be less than the number requested.
+
+ \param[in] dest the destination address to which the data will be read
+ \param[in] count the number of bytes requested
+
+ \return the number of bytes read from the stream.
+ */
+
+ virtual uint32_t read(void* dest, uint32_t count) = 0;
+
+ virtual ~NvInputStream()
+ {
+ }
+};
+
+/**
+\brief Input data class for I/O which provides random read access.
+
+The user needs to supply a NvInputData implementation to a number of methods to allow the SDK to read data.
+*/
+
+class NvInputData : public NvInputStream
+{
+ public:
+ /**
+ \brief return the length of the input data
+
+ \return size in bytes of the input data
+ */
+
+ virtual uint32_t getLength() const = 0;
+
+ /**
+ \brief seek to the given offset from the start of the data.
+
+ \param[in] offset the offset to seek to. If greater than the length of the data, this call is equivalent to
+ seek(length);
+ */
+
+ virtual void seek(uint32_t offset) = 0;
+
+ /**
+ \brief return the current offset from the start of the data
+
+ \return the offset to seek to.
+ */
+
+ virtual uint32_t tell() const = 0;
+
+ virtual ~NvInputData()
+ {
+ }
+};
+
+/**
+\brief Output stream class for I/O.
+
+The user needs to supply a NvOutputStream implementation to a number of methods to allow the SDK to write data.
+*/
+
+class NvOutputStream
+{
+ public:
+ /**
+ \brief write to the stream. The number of bytes written may be less than the number sent.
+
+ \param[in] src the destination address from which the data will be written
+ \param[in] count the number of bytes to be written
+
+ \return the number of bytes written to the stream by this call.
+ */
+
+ virtual uint32_t write(const void* src, uint32_t count) = 0;
+
+ virtual ~NvOutputStream()
+ {
+ }
+};
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVIO_H
diff --git a/blast/include/shared/NvFoundation/NvIntrinsics.h b/blast/include/shared/NvFoundation/NvIntrinsics.h
new file mode 100644
index 000000000..82cf8995c
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvIntrinsics.h
@@ -0,0 +1,52 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVINTRINSICS_H
+#define NV_NVFOUNDATION_NVINTRINSICS_H
+
+#include "NvPreprocessor.h"
+
+#if NV_WINDOWS_FAMILY
+#include "platform/windows/NvWindowsIntrinsics.h"
+#elif NV_X360
+#include "xbox360/NvXbox360Intrinsics.h"
+#elif(NV_LINUX || NV_ANDROID || NV_APPLE_FAMILY || NV_PS4)
+#include "platform/unix/NvUnixIntrinsics.h"
+#elif NV_PS3
+#include "ps3/NvPS3Intrinsics.h"
+#elif NV_PSP2
+#include "psp2/NvPSP2Intrinsics.h"
+#elif NV_WIIU
+#include "wiiu/NvWiiUIntrinsics.h"
+#elif NV_XBOXONE
+#include "XboxOne/NvXboxOneIntrinsics.h"
+#else
+#error "Platform not supported!"
+#endif
+
+#endif // #ifndef NV_NVFOUNDATION_NVINTRINSICS_H
diff --git a/blast/include/shared/NvFoundation/NvMat33.h b/blast/include/shared/NvFoundation/NvMat33.h
new file mode 100644
index 000000000..f459502e4
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvMat33.h
@@ -0,0 +1,391 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVMAT33_H
+#define NV_NVFOUNDATION_NVMAT33_H
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvVec3.h"
+#include "NvQuat.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+/*!
+\brief 3x3 matrix class
+
+Some clarifications, as there have been much confusion about matrix formats etc in the past.
+
+Short:
+- Matrix have base vectors in columns (vectors are column matrices, 3x1 matrices).
+- Matrix is physically stored in column major format
+- Matrices are concaternated from left
+
+Long:
+Given three base vectors a, b and c the matrix is stored as
+
+|a.x b.x c.x|
+|a.y b.y c.y|
+|a.z b.z c.z|
+
+Vectors are treated as columns, so the vector v is
+
+|x|
+|y|
+|z|
+
+And matrices are applied _before_ the vector (pre-multiplication)
+v' = M*v
+
+|x'| |a.x b.x c.x| |x| |a.x*x + b.x*y + c.x*z|
+|y'| = |a.y b.y c.y| * |y| = |a.y*x + b.y*y + c.y*z|
+|z'| |a.z b.z c.z| |z| |a.z*x + b.z*y + c.z*z|
+
+
+Physical storage and indexing:
+To be compatible with popular 3d rendering APIs (read D3d and OpenGL)
+the physical indexing is
+
+|0 3 6|
+|1 4 7|
+|2 5 8|
+
+index = column*3 + row
+
+which in C++ translates to M[column][row]
+
+The mathematical indexing is M_row,column and this is what is used for _-notation
+so _12 is 1st row, second column and operator(row, column)!
+
+*/
+class NvMat33
+{
+ public:
+ //! Default constructor
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33()
+ {
+ }
+
+ //! identity constructor
+ NV_CUDA_CALLABLE NV_INLINE NvMat33(NvIDENTITY r)
+ : column0(1.0f, 0.0f, 0.0f), column1(0.0f, 1.0f, 0.0f), column2(0.0f, 0.0f, 1.0f)
+ {
+ NV_UNUSED(r);
+ }
+
+ //! zero constructor
+ NV_CUDA_CALLABLE NV_INLINE NvMat33(NvZERO r) : column0(0.0f), column1(0.0f), column2(0.0f)
+ {
+ NV_UNUSED(r);
+ }
+
+ //! Construct from three base vectors
+ NV_CUDA_CALLABLE NvMat33(const NvVec3& col0, const NvVec3& col1, const NvVec3& col2)
+ : column0(col0), column1(col1), column2(col2)
+ {
+ }
+
+ //! constructor from a scalar, which generates a multiple of the identity matrix
+ explicit NV_CUDA_CALLABLE NV_INLINE NvMat33(float r)
+ : column0(r, 0.0f, 0.0f), column1(0.0f, r, 0.0f), column2(0.0f, 0.0f, r)
+ {
+ }
+
+ //! Construct from float[9]
+ explicit NV_CUDA_CALLABLE NV_INLINE NvMat33(float values[])
+ : column0(values[0], values[1], values[2])
+ , column1(values[3], values[4], values[5])
+ , column2(values[6], values[7], values[8])
+ {
+ }
+
+ //! Construct from a quaternion
+ explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33(const NvQuat& q)
+ {
+ const float x = q.x;
+ const float y = q.y;
+ const float z = q.z;
+ const float w = q.w;
+
+ const float x2 = x + x;
+ const float y2 = y + y;
+ const float z2 = z + z;
+
+ const float xx = x2 * x;
+ const float yy = y2 * y;
+ const float zz = z2 * z;
+
+ const float xy = x2 * y;
+ const float xz = x2 * z;
+ const float xw = x2 * w;
+
+ const float yz = y2 * z;
+ const float yw = y2 * w;
+ const float zw = z2 * w;
+
+ column0 = NvVec3(1.0f - yy - zz, xy + zw, xz - yw);
+ column1 = NvVec3(xy - zw, 1.0f - xx - zz, yz + xw);
+ column2 = NvVec3(xz + yw, yz - xw, 1.0f - xx - yy);
+ }
+
+ //! Copy constructor
+ NV_CUDA_CALLABLE NV_INLINE NvMat33(const NvMat33& other)
+ : column0(other.column0), column1(other.column1), column2(other.column2)
+ {
+ }
+
+ //! Assignment operator
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33& operator=(const NvMat33& other)
+ {
+ column0 = other.column0;
+ column1 = other.column1;
+ column2 = other.column2;
+ return *this;
+ }
+
+ //! Construct from diagonal, off-diagonals are zero.
+ NV_CUDA_CALLABLE NV_INLINE static NvMat33 createDiagonal(const NvVec3& d)
+ {
+ return NvMat33(NvVec3(d.x, 0.0f, 0.0f), NvVec3(0.0f, d.y, 0.0f), NvVec3(0.0f, 0.0f, d.z));
+ }
+
+ /**
+ \brief returns true if the two matrices are exactly equal
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvMat33& m) const
+ {
+ return column0 == m.column0 && column1 == m.column1 && column2 == m.column2;
+ }
+
+ //! Get transposed matrix
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33 getTranspose() const
+ {
+ const NvVec3 v0(column0.x, column1.x, column2.x);
+ const NvVec3 v1(column0.y, column1.y, column2.y);
+ const NvVec3 v2(column0.z, column1.z, column2.z);
+
+ return NvMat33(v0, v1, v2);
+ }
+
+ //! Get the real inverse
+ NV_CUDA_CALLABLE NV_INLINE NvMat33 getInverse() const
+ {
+ const float det = getDeterminant();
+ NvMat33 inverse;
+
+ if(det != 0)
+ {
+ const float invDet = 1.0f / det;
+
+ inverse.column0.x = invDet * (column1.y * column2.z - column2.y * column1.z);
+ inverse.column0.y = invDet * -(column0.y * column2.z - column2.y * column0.z);
+ inverse.column0.z = invDet * (column0.y * column1.z - column0.z * column1.y);
+
+ inverse.column1.x = invDet * -(column1.x * column2.z - column1.z * column2.x);
+ inverse.column1.y = invDet * (column0.x * column2.z - column0.z * column2.x);
+ inverse.column1.z = invDet * -(column0.x * column1.z - column0.z * column1.x);
+
+ inverse.column2.x = invDet * (column1.x * column2.y - column1.y * column2.x);
+ inverse.column2.y = invDet * -(column0.x * column2.y - column0.y * column2.x);
+ inverse.column2.z = invDet * (column0.x * column1.y - column1.x * column0.y);
+
+ return inverse;
+ }
+ else
+ {
+ return NvMat33(NvIdentity);
+ }
+ }
+
+ //! Get determinant
+ NV_CUDA_CALLABLE NV_INLINE float getDeterminant() const
+ {
+ return column0.dot(column1.cross(column2));
+ }
+
+ //! Unary minus
+ NV_CUDA_CALLABLE NV_INLINE NvMat33 operator-() const
+ {
+ return NvMat33(-column0, -column1, -column2);
+ }
+
+ //! Add
+ NV_CUDA_CALLABLE NV_INLINE NvMat33 operator+(const NvMat33& other) const
+ {
+ return NvMat33(column0 + other.column0, column1 + other.column1, column2 + other.column2);
+ }
+
+ //! Subtract
+ NV_CUDA_CALLABLE NV_INLINE NvMat33 operator-(const NvMat33& other) const
+ {
+ return NvMat33(column0 - other.column0, column1 - other.column1, column2 - other.column2);
+ }
+
+ //! Scalar multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvMat33 operator*(float scalar) const
+ {
+ return NvMat33(column0 * scalar, column1 * scalar, column2 * scalar);
+ }
+
+ friend NvMat33 operator*(float, const NvMat33&);
+
+ //! Matrix vector multiplication (returns 'this->transform(vec)')
+ NV_CUDA_CALLABLE NV_INLINE NvVec3 operator*(const NvVec3& vec) const
+ {
+ return transform(vec);
+ }
+
+ // a = b operators
+
+ //! Matrix multiplication
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvMat33 operator*(const NvMat33& other) const
+ {
+ // Rows from this columns from other
+ // column0 = transform(other.column0) etc
+ return NvMat33(transform(other.column0), transform(other.column1), transform(other.column2));
+ }
+
+ //! Equals-add
+ NV_CUDA_CALLABLE NV_INLINE NvMat33& operator+=(const NvMat33& other)
+ {
+ column0 += other.column0;
+ column1 += other.column1;
+ column2 += other.column2;
+ return *this;
+ }
+
+ //! Equals-sub
+ NV_CUDA_CALLABLE NV_INLINE NvMat33& operator-=(const NvMat33& other)
+ {
+ column0 -= other.column0;
+ column1 -= other.column1;
+ column2 -= other.column2;
+ return *this;
+ }
+
+ //! Equals scalar multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvMat33& operator*=(float scalar)
+ {
+ column0 *= scalar;
+ column1 *= scalar;
+ column2 *= scalar;
+ return *this;
+ }
+
+ //! Equals matrix multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvMat33& operator*=(const NvMat33& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ //! Element access, mathematical way!
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const
+ {
+ return (*this)[col][row];
+ }
+
+ //! Element access, mathematical way!
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator()(unsigned int row, unsigned int col)
+ {
+ return (*this)[col][row];
+ }
+
+ // Transform etc
+
+ //! Transform vector by matrix, equal to v' = M*v
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transform(const NvVec3& other) const
+ {
+ return column0 * other.x + column1 * other.y + column2 * other.z;
+ }
+
+ //! Transform vector by matrix transpose, v' = M^t*v
+ NV_CUDA_CALLABLE NV_INLINE NvVec3 transformTranspose(const NvVec3& other) const
+ {
+ return NvVec3(column0.dot(other), column1.dot(other), column2.dot(other));
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE const float* front() const
+ {
+ return &column0.x;
+ }
+
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator[](unsigned int num)
+ {
+ return (&column0)[num];
+ }
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3& operator[](unsigned int num) const
+ {
+ return (&column0)[num];
+ }
+
+ // Data, see above for format!
+
+ NvVec3 column0, column1, column2; // the three base vectors
+};
+
+// implementation from NvQuat.h
+NV_CUDA_CALLABLE NV_INLINE NvQuat::NvQuat(const NvMat33& m)
+{
+ if (m.column2.z < 0)
+ {
+ if (m.column0.x > m.column1.y)
+ {
+ float t = 1 + m.column0.x - m.column1.y - m.column2.z;
+ *this = NvQuat(t, m.column0.y + m.column1.x, m.column2.x + m.column0.z, m.column1.z - m.column2.y) * (0.5f / NvSqrt(t));
+ }
+ else
+ {
+ float t = 1 - m.column0.x + m.column1.y - m.column2.z;
+ *this = NvQuat(m.column0.y + m.column1.x, t, m.column1.z + m.column2.y, m.column2.x - m.column0.z) * (0.5f / NvSqrt(t));
+ }
+ }
+ else
+ {
+ if (m.column0.x < -m.column1.y)
+ {
+ float t = 1 - m.column0.x - m.column1.y + m.column2.z;
+ *this = NvQuat(m.column2.x + m.column0.z, m.column1.z + m.column2.y, t, m.column0.y - m.column1.x) * (0.5f / NvSqrt(t));
+ }
+ else
+ {
+ float t = 1 + m.column0.x + m.column1.y + m.column2.z;
+ *this = NvQuat(m.column1.z - m.column2.y, m.column2.x - m.column0.z, m.column0.y - m.column1.x, t) * (0.5f / NvSqrt(t));
+ }
+ }
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVMAT33_H
diff --git a/blast/include/shared/NvFoundation/NvMat44.h b/blast/include/shared/NvFoundation/NvMat44.h
new file mode 100644
index 000000000..e4d12e331
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvMat44.h
@@ -0,0 +1,375 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVMAT44_H
+#define NV_NVFOUNDATION_NVMAT44_H
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvQuat.h"
+#include "NvVec4.h"
+#include "NvMat33.h"
+#include "NvTransform.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/*!
+\brief 4x4 matrix class
+
+This class is layout-compatible with D3D and OpenGL matrices. More notes on layout are given in the NvMat33
+
+@see NvMat33 NvTransform
+*/
+
+class NvMat44
+{
+ public:
+ //! Default constructor
+ NV_CUDA_CALLABLE NV_INLINE NvMat44()
+ {
+ }
+
+ //! identity constructor
+ NV_CUDA_CALLABLE NV_INLINE NvMat44(NvIDENTITY r)
+ : column0(1.0f, 0.0f, 0.0f, 0.0f)
+ , column1(0.0f, 1.0f, 0.0f, 0.0f)
+ , column2(0.0f, 0.0f, 1.0f, 0.0f)
+ , column3(0.0f, 0.0f, 0.0f, 1.0f)
+ {
+ NV_UNUSED(r);
+ }
+
+ //! zero constructor
+ NV_CUDA_CALLABLE NV_INLINE NvMat44(NvZERO r) : column0(NvZero), column1(NvZero), column2(NvZero), column3(NvZero)
+ {
+ NV_UNUSED(r);
+ }
+
+ //! Construct from four 4-vectors
+ NV_CUDA_CALLABLE NvMat44(const NvVec4& col0, const NvVec4& col1, const NvVec4& col2, const NvVec4& col3)
+ : column0(col0), column1(col1), column2(col2), column3(col3)
+ {
+ }
+
+ //! constructor that generates a multiple of the identity matrix
+ explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(float r)
+ : column0(r, 0.0f, 0.0f, 0.0f)
+ , column1(0.0f, r, 0.0f, 0.0f)
+ , column2(0.0f, 0.0f, r, 0.0f)
+ , column3(0.0f, 0.0f, 0.0f, r)
+ {
+ }
+
+ //! Construct from three base vectors and a translation
+ NV_CUDA_CALLABLE NvMat44(const NvVec3& col0, const NvVec3& col1, const NvVec3& col2, const NvVec3& col3)
+ : column0(col0, 0), column1(col1, 0), column2(col2, 0), column3(col3, 1.0f)
+ {
+ }
+
+ //! Construct from float[16]
+ explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(float values[])
+ : column0(values[0], values[1], values[2], values[3])
+ , column1(values[4], values[5], values[6], values[7])
+ , column2(values[8], values[9], values[10], values[11])
+ , column3(values[12], values[13], values[14], values[15])
+ {
+ }
+
+ //! Construct from a quaternion
+ explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvQuat& q)
+ {
+ const float x = q.x;
+ const float y = q.y;
+ const float z = q.z;
+ const float w = q.w;
+
+ const float x2 = x + x;
+ const float y2 = y + y;
+ const float z2 = z + z;
+
+ const float xx = x2 * x;
+ const float yy = y2 * y;
+ const float zz = z2 * z;
+
+ const float xy = x2 * y;
+ const float xz = x2 * z;
+ const float xw = x2 * w;
+
+ const float yz = y2 * z;
+ const float yw = y2 * w;
+ const float zw = z2 * w;
+
+ column0 = NvVec4(1.0f - yy - zz, xy + zw, xz - yw, 0.0f);
+ column1 = NvVec4(xy - zw, 1.0f - xx - zz, yz + xw, 0.0f);
+ column2 = NvVec4(xz + yw, yz - xw, 1.0f - xx - yy, 0.0f);
+ column3 = NvVec4(0.0f, 0.0f, 0.0f, 1.0f);
+ }
+
+ //! Construct from a diagonal vector
+ explicit NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvVec4& diagonal)
+ : column0(diagonal.x, 0.0f, 0.0f, 0.0f)
+ , column1(0.0f, diagonal.y, 0.0f, 0.0f)
+ , column2(0.0f, 0.0f, diagonal.z, 0.0f)
+ , column3(0.0f, 0.0f, 0.0f, diagonal.w)
+ {
+ }
+
+ //! Construct from Mat33 and a translation
+ NV_CUDA_CALLABLE NvMat44(const NvMat33& axes, const NvVec3& position)
+ : column0(axes.column0, 0.0f), column1(axes.column1, 0.0f), column2(axes.column2, 0.0f), column3(position, 1.0f)
+ {
+ }
+
+ NV_CUDA_CALLABLE NvMat44(const NvTransform& t)
+ {
+ *this = NvMat44(NvMat33(t.q), t.p);
+ }
+
+ /**
+ \brief returns true if the two matrices are exactly equal
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvMat44& m) const
+ {
+ return column0 == m.column0 && column1 == m.column1 && column2 == m.column2 && column3 == m.column3;
+ }
+
+ //! Copy constructor
+ NV_CUDA_CALLABLE NV_INLINE NvMat44(const NvMat44& other)
+ : column0(other.column0), column1(other.column1), column2(other.column2), column3(other.column3)
+ {
+ }
+
+ //! Assignment operator
+ NV_CUDA_CALLABLE NV_INLINE const NvMat44& operator=(const NvMat44& other)
+ {
+ column0 = other.column0;
+ column1 = other.column1;
+ column2 = other.column2;
+ column3 = other.column3;
+ return *this;
+ }
+
+ //! Get transposed matrix
+ NV_CUDA_CALLABLE NV_INLINE NvMat44 getTranspose() const
+ {
+ return NvMat44(
+ NvVec4(column0.x, column1.x, column2.x, column3.x), NvVec4(column0.y, column1.y, column2.y, column3.y),
+ NvVec4(column0.z, column1.z, column2.z, column3.z), NvVec4(column0.w, column1.w, column2.w, column3.w));
+ }
+
+ //! Unary minus
+ NV_CUDA_CALLABLE NV_INLINE NvMat44 operator-() const
+ {
+ return NvMat44(-column0, -column1, -column2, -column3);
+ }
+
+ //! Add
+ NV_CUDA_CALLABLE NV_INLINE NvMat44 operator+(const NvMat44& other) const
+ {
+ return NvMat44(column0 + other.column0, column1 + other.column1, column2 + other.column2,
+ column3 + other.column3);
+ }
+
+ //! Subtract
+ NV_CUDA_CALLABLE NV_INLINE NvMat44 operator-(const NvMat44& other) const
+ {
+ return NvMat44(column0 - other.column0, column1 - other.column1, column2 - other.column2,
+ column3 - other.column3);
+ }
+
+ //! Scalar multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvMat44 operator*(float scalar) const
+ {
+ return NvMat44(column0 * scalar, column1 * scalar, column2 * scalar, column3 * scalar);
+ }
+
+ friend NvMat44 operator*(float, const NvMat44&);
+
+ //! Matrix multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvMat44 operator*(const NvMat44& other) const
+ {
+ // Rows from this columns from other
+ // column0 = transform(other.column0) etc
+ return NvMat44(transform(other.column0), transform(other.column1), transform(other.column2),
+ transform(other.column3));
+ }
+
+ // a = b operators
+
+ //! Equals-add
+ NV_CUDA_CALLABLE NV_INLINE NvMat44& operator+=(const NvMat44& other)
+ {
+ column0 += other.column0;
+ column1 += other.column1;
+ column2 += other.column2;
+ column3 += other.column3;
+ return *this;
+ }
+
+ //! Equals-sub
+ NV_CUDA_CALLABLE NV_INLINE NvMat44& operator-=(const NvMat44& other)
+ {
+ column0 -= other.column0;
+ column1 -= other.column1;
+ column2 -= other.column2;
+ column3 -= other.column3;
+ return *this;
+ }
+
+ //! Equals scalar multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvMat44& operator*=(float scalar)
+ {
+ column0 *= scalar;
+ column1 *= scalar;
+ column2 *= scalar;
+ column3 *= scalar;
+ return *this;
+ }
+
+ //! Equals matrix multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvMat44& operator*=(const NvMat44& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ //! Element access, mathematical way!
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float operator()(unsigned int row, unsigned int col) const
+ {
+ return (*this)[col][row];
+ }
+
+ //! Element access, mathematical way!
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator()(unsigned int row, unsigned int col)
+ {
+ return (*this)[col][row];
+ }
+
+ //! Transform vector by matrix, equal to v' = M*v
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 transform(const NvVec4& other) const
+ {
+ return column0 * other.x + column1 * other.y + column2 * other.z + column3 * other.w;
+ }
+
+ //! Transform vector by matrix, equal to v' = M*v
+ NV_CUDA_CALLABLE NV_INLINE NvVec3 transform(const NvVec3& other) const
+ {
+ return transform(NvVec4(other, 1.0f)).getXYZ();
+ }
+
+ //! Rotate vector by matrix, equal to v' = M*v
+ NV_CUDA_CALLABLE NV_INLINE const NvVec4 rotate(const NvVec4& other) const
+ {
+ return column0 * other.x + column1 * other.y + column2 * other.z; // + column3*0;
+ }
+
+ //! Rotate vector by matrix, equal to v' = M*v
+ NV_CUDA_CALLABLE NV_INLINE const NvVec3 rotate(const NvVec3& other) const
+ {
+ return rotate(NvVec4(other, 1.0f)).getXYZ();
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE NvVec3 getBasis(int num) const
+ {
+ NV_ASSERT(num >= 0 && num < 3);
+ return (&column0)[num].getXYZ();
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE NvVec3 getPosition() const
+ {
+ return column3.getXYZ();
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE void setPosition(const NvVec3& position)
+ {
+ column3.x = position.x;
+ column3.y = position.y;
+ column3.z = position.z;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE const float* front() const
+ {
+ return &column0.x;
+ }
+
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec4& operator[](unsigned int num)
+ {
+ return (&column0)[num];
+ }
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec4& operator[](unsigned int num) const
+ {
+ return (&column0)[num];
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE void scale(const NvVec4& p)
+ {
+ column0 *= p.x;
+ column1 *= p.y;
+ column2 *= p.z;
+ column3 *= p.w;
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE NvMat44 inverseRT(void) const
+ {
+ NvVec3 r0(column0.x, column1.x, column2.x), r1(column0.y, column1.y, column2.y),
+ r2(column0.z, column1.z, column2.z);
+
+ return NvMat44(r0, r1, r2, -(r0 * column3.x + r1 * column3.y + r2 * column3.z));
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE bool isFinite() const
+ {
+ return column0.isFinite() && column1.isFinite() && column2.isFinite() && column3.isFinite();
+ }
+
+ // Data, see above for format!
+
+ NvVec4 column0, column1, column2, column3; // the four base vectors
+};
+
+// implementation from NvTransform.h
+NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform::NvTransform(const NvMat44& m)
+{
+ NvVec3 column0 = NvVec3(m.column0.x, m.column0.y, m.column0.z);
+ NvVec3 column1 = NvVec3(m.column1.x, m.column1.y, m.column1.z);
+ NvVec3 column2 = NvVec3(m.column2.x, m.column2.y, m.column2.z);
+
+ q = NvQuat(NvMat33(column0, column1, column2));
+ p = NvVec3(m.column3.x, m.column3.y, m.column3.z);
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVMAT44_H
diff --git a/blast/include/shared/NvFoundation/NvMath.h b/blast/include/shared/NvFoundation/NvMath.h
new file mode 100644
index 000000000..3bba0d388
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvMath.h
@@ -0,0 +1,337 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVMATH_H
+#define NV_NVFOUNDATION_NVMATH_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvPreprocessor.h"
+
+#if NV_VC
+#pragma warning(push)
+#pragma warning(disable : 4985) // 'symbol name': attributes not present on previous declaration
+#endif
+#include
+#if NV_VC
+#pragma warning(pop)
+#endif
+
+#include
+#include "NvIntrinsics.h"
+#include "NvAssert.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+// constants
+static const float NvPi = float(3.141592653589793);
+static const float NvHalfPi = float(1.57079632679489661923);
+static const float NvTwoPi = float(6.28318530717958647692);
+static const float NvInvPi = float(0.31830988618379067154);
+static const float NvInvTwoPi = float(0.15915494309189533577);
+static const float NvPiDivTwo = float(1.57079632679489661923);
+static const float NvPiDivFour = float(0.78539816339744830962);
+
+/**
+\brief The return value is the greater of the two specified values.
+*/
+template
+NV_CUDA_CALLABLE NV_FORCE_INLINE T NvMax(T a, T b)
+{
+ return a < b ? b : a;
+}
+
+//! overload for float to use fsel on xbox
+template <>
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvMax(float a, float b)
+{
+ return intrinsics::selectMax(a, b);
+}
+
+/**
+\brief The return value is the lesser of the two specified values.
+*/
+template
+NV_CUDA_CALLABLE NV_FORCE_INLINE T NvMin(T a, T b)
+{
+ return a < b ? a : b;
+}
+
+template <>
+//! overload for float to use fsel on xbox
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvMin(float a, float b)
+{
+ return intrinsics::selectMin(a, b);
+}
+
+/*
+Many of these are just implemented as NV_CUDA_CALLABLE NV_FORCE_INLINE calls to the C lib right now,
+but later we could replace some of them with some approximations or more
+clever stuff.
+*/
+
+/**
+\brief abs returns the absolute value of its argument.
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAbs(float a)
+{
+ return intrinsics::abs(a);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvEquals(float a, float b, float eps)
+{
+ return (NvAbs(a - b) < eps);
+}
+
+/**
+\brief abs returns the absolute value of its argument.
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAbs(double a)
+{
+ return ::fabs(a);
+}
+
+/**
+\brief abs returns the absolute value of its argument.
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE int32_t NvAbs(int32_t a)
+{
+ return ::abs(a);
+}
+
+/**
+\brief Clamps v to the range [hi,lo]
+*/
+template
+NV_CUDA_CALLABLE NV_FORCE_INLINE T NvClamp(T v, T lo, T hi)
+{
+ NV_ASSERT(lo <= hi);
+ return NvMin(hi, NvMax(lo, v));
+}
+
+//! \brief Square root.
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSqrt(float a)
+{
+ return intrinsics::sqrt(a);
+}
+
+//! \brief Square root.
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvSqrt(double a)
+{
+ return ::sqrt(a);
+}
+
+//! \brief reciprocal square root.
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvRecipSqrt(float a)
+{
+ return intrinsics::recipSqrt(a);
+}
+
+//! \brief reciprocal square root.
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvRecipSqrt(double a)
+{
+ return 1 / ::sqrt(a);
+}
+
+//! trigonometry -- all angles are in radians.
+
+//! \brief Sine of an angle ( Unit: Radians )
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSin(float a)
+{
+ return intrinsics::sin(a);
+}
+
+//! \brief Sine of an angle ( Unit: Radians )
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvSin(double a)
+{
+ return ::sin(a);
+}
+
+//! \brief Cosine of an angle (Unit: Radians)
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvCos(float a)
+{
+ return intrinsics::cos(a);
+}
+
+//! \brief Cosine of an angle (Unit: Radians)
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvCos(double a)
+{
+ return ::cos(a);
+}
+
+/**
+\brief Tangent of an angle.
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvTan(float a)
+{
+ return ::tanf(a);
+}
+
+/**
+\brief Tangent of an angle.
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvTan(double a)
+{
+ return ::tan(a);
+}
+
+/**
+\brief Arcsine.
+Returns angle between -PI/2 and PI/2 in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAsin(float f)
+{
+ return ::asinf(NvClamp(f, -1.0f, 1.0f));
+}
+
+/**
+\brief Arcsine.
+Returns angle between -PI/2 and PI/2 in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAsin(double f)
+{
+ return ::asin(NvClamp(f, -1.0, 1.0));
+}
+
+/**
+\brief Arccosine.
+Returns angle between 0 and PI in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAcos(float f)
+{
+ return ::acosf(NvClamp(f, -1.0f, 1.0f));
+}
+
+/**
+\brief Arccosine.
+Returns angle between 0 and PI in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAcos(double f)
+{
+ return ::acos(NvClamp(f, -1.0, 1.0));
+}
+
+/**
+\brief ArcTangent.
+Returns angle between -PI/2 and PI/2 in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAtan(float a)
+{
+ return ::atanf(a);
+}
+
+/**
+\brief ArcTangent.
+Returns angle between -PI/2 and PI/2 in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAtan(double a)
+{
+ return ::atan(a);
+}
+
+/**
+\brief Arctangent of (x/y) with correct sign.
+Returns angle between -PI and PI in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvAtan2(float x, float y)
+{
+ return ::atan2f(x, y);
+}
+
+/**
+\brief Arctangent of (x/y) with correct sign.
+Returns angle between -PI and PI in radians
+Unit: Radians
+*/
+NV_CUDA_CALLABLE NV_FORCE_INLINE double NvAtan2(double x, double y)
+{
+ return ::atan2(x, y);
+}
+
+//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvIsFinite(float f)
+{
+ return intrinsics::isFinite(f);
+}
+
+//! \brief returns true if the passed number is a finite floating point number as opposed to INF, NAN, etc.
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool NvIsFinite(double f)
+{
+ return intrinsics::isFinite(f);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvFloor(float a)
+{
+ return ::floorf(a);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvExp(float a)
+{
+ return ::expf(a);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvCeil(float a)
+{
+ return ::ceilf(a);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvSign(float a)
+{
+ return nvidia::intrinsics::sign(a);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvPow(float x, float y)
+{
+ return ::powf(x, y);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float NvLog(float x)
+{
+ return ::logf(x);
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVMATH_H
diff --git a/blast/include/shared/NvFoundation/NvPlane.h b/blast/include/shared/NvFoundation/NvPlane.h
new file mode 100644
index 000000000..6aa18d8f8
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvPlane.h
@@ -0,0 +1,144 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVPLANE_H
+#define NV_NVFOUNDATION_NVPLANE_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvMath.h"
+#include "NvVec3.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/**
+\brief Representation of a plane.
+
+ Plane equation used: n.dot(v) + d = 0
+*/
+class NvPlane
+{
+ public:
+ /**
+ \brief Constructor
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane()
+ {
+ }
+
+ /**
+ \brief Constructor from a normal and a distance
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(float nx, float ny, float nz, float distance) : n(nx, ny, nz), d(distance)
+ {
+ }
+
+ /**
+ \brief Constructor from a normal and a distance
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& normal, float distance) : n(normal), d(distance)
+ {
+ }
+
+ /**
+ \brief Constructor from a point on the plane and a normal
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& point, const NvVec3& normal)
+ : n(normal), d(-point.dot(n)) // p satisfies normal.dot(p) + d = 0
+ {
+ }
+
+ /**
+ \brief Constructor from three points
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane(const NvVec3& p0, const NvVec3& p1, const NvVec3& p2)
+ {
+ n = (p1 - p0).cross(p2 - p0).getNormalized();
+ d = -p0.dot(n);
+ }
+
+ /**
+ \brief returns true if the two planes are exactly equal
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvPlane& p) const
+ {
+ return n == p.n && d == p.d;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float distance(const NvVec3& p) const
+ {
+ return p.dot(n) + d;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool contains(const NvVec3& p) const
+ {
+ return NvAbs(distance(p)) < (1.0e-7f);
+ }
+
+ /**
+ \brief projects p into the plane
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 project(const NvVec3& p) const
+ {
+ return p - n * distance(p);
+ }
+
+ /**
+ \brief find an arbitrary point in the plane
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 pointInPlane() const
+ {
+ return -n * d;
+ }
+
+ /**
+ \brief equivalent plane with unit normal
+ */
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE void normalize()
+ {
+ float denom = 1.0f / n.magnitude();
+ n *= denom;
+ d *= denom;
+ }
+
+ NvVec3 n; //!< The normal to the plane
+ float d; //!< The distance from the origin
+};
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVPLANE_H
diff --git a/blast/include/lowlevel/NvPreprocessor.h b/blast/include/shared/NvFoundation/NvPreprocessor.h
similarity index 96%
rename from blast/include/lowlevel/NvPreprocessor.h
rename to blast/include/shared/NvFoundation/NvPreprocessor.h
index 9274c6789..1f562101f 100644
--- a/blast/include/lowlevel/NvPreprocessor.h
+++ b/blast/include/shared/NvFoundation/NvPreprocessor.h
@@ -22,13 +22,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2008-2022 NVIDIA Corporation. All rights reserved.
-// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
-// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
-
-//! @file
-//!
-//! @brief Generally userful preprocessor definitions
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
#ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H
#define NV_NVFOUNDATION_NVPREPROCESSOR_H
@@ -78,7 +74,7 @@ Operating system defines, see http://sourceforge.net/p/predef/wiki/OperatingSyst
*/
#if defined(WINAPI_FAMILY) && WINAPI_FAMILY == WINAPI_PARTITION_APP
#define NV_WINRT 1 // Windows Runtime, either on Windows RT or Windows 8
-#elif defined(XBOXONE) || defined(_XBOX_ONE)
+#elif defined(XBOXONE)
#define NV_XBOXONE 1
#elif defined(_WIN64) // note: XBOXONE implies _WIN64
#define NV_WIN64 1
@@ -276,14 +272,14 @@ Assert macro
DLL export macros
*/
#ifndef NV_C_EXPORT
-#if NV_WINDOWS_FAMILY || NV_LINUX || NV_PS4 || NV_XBOXONE
+#if NV_WINDOWS_FAMILY || NV_LINUX
#define NV_C_EXPORT extern "C"
#else
#define NV_C_EXPORT
#endif
#endif
-#if NV_UNIX_FAMILY && __GNUC__ >= 4
+#if NV_UNIX_FAMILY&& __GNUC__ >= 4
#define NV_UNIX_EXPORT __attribute__((visibility("default")))
#else
#define NV_UNIX_EXPORT
@@ -463,7 +459,7 @@ General defines
#endif
// make sure NV_CHECKED is defined in all _DEBUG configurations as well
-#if !defined(NV_CHECKED) && defined(NV_DEBUG)
+#if !NV_CHECKED && NV_DEBUG
#error NV_CHECKED must be defined when NV_DEBUG is defined
#endif
@@ -539,5 +535,8 @@ protected:
#define NV_CONCAT_HELPER(X, Y) X##Y
#define NV_CONCAT(X, Y) NV_CONCAT_HELPER(X, Y)
+// C-style API declaration.
+#define NV_C_API NV_C_EXPORT NV_DLL_EXPORT
+
/** @} */
#endif // #ifndef NV_NVFOUNDATION_NVPREPROCESSOR_H
diff --git a/blast/include/shared/NvFoundation/NvProfiler.h b/blast/include/shared/NvFoundation/NvProfiler.h
new file mode 100644
index 000000000..6c070e337
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvProfiler.h
@@ -0,0 +1,141 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_PROFILER_H
+#define NV_PROFILER_H
+
+#include
+
+namespace nvidia
+{
+ class NvProfilerCallback;
+ namespace shdfnd
+ {
+ NV_FOUNDATION_API NvProfilerCallback *getProfilerCallback();
+ NV_FOUNDATION_API void setProfilerCallback(NvProfilerCallback *profiler);
+ }
+}
+
+
+namespace nvidia
+{
+
+struct NvProfileContext
+{
+ enum Enum
+ {
+ eNONE = 0 //!< value for no specific profile context. \see NvProfilerCallback::zoneAt
+ };
+};
+
+
+/**
+\brief The pure virtual callback interface for general purpose instrumentation and profiling of GameWorks modules as well as applications
+*/
+class NvProfilerCallback
+{
+protected:
+ virtual ~NvProfilerCallback() {}
+
+public:
+ /**************************************************************************************************************************
+ Instrumented profiling events
+ ***************************************************************************************************************************/
+
+ /**
+ \brief Mark the beginning of a nested profile block
+ \param[in] eventName Event name. Must be a persistent const char *
+ \param[in] detached True for cross thread events
+ \param[in] contextId the context id of this zone. Zones with the same id belong to the same group. 0 is used for no specific group.
+ \return Returns implementation-specific profiler data for this event
+ */
+ virtual void* zoneStart(const char* eventName, bool detached, uint64_t contextId) = 0;
+
+ /**
+ \brief Mark the end of a nested profile block
+ \param[in] profilerData The data returned by the corresponding zoneStart call (or NULL if not available)
+ \param[in] eventName The name of the zone ending, must match the corresponding name passed with 'zoneStart'. Must be a persistent const char *.
+ \param[in] detached True for cross thread events. Should match the value passed to zoneStart.
+ \param[in] contextId The context of this zone. Should match the value passed to zoneStart.
+
+ \note eventName plus contextId can be used to uniquely match up start and end of a zone.
+ */
+ virtual void zoneEnd(void* profilerData, const char* eventName, bool detached, uint64_t contextId) = 0;
+};
+
+class NvProfileScoped
+{
+public:
+ NV_FORCE_INLINE NvProfileScoped(const char* eventName, bool detached, uint64_t contextId)
+ : mCallback(nvidia::shdfnd::getProfilerCallback())
+ {
+ if (mCallback)
+ {
+ mEventName = eventName;
+ mDetached = detached;
+ mContextId = contextId;
+ mProfilerData = mCallback->zoneStart(mEventName, mDetached, mContextId);
+ }
+ }
+ ~NvProfileScoped(void)
+ {
+ if (mCallback)
+ {
+ mCallback->zoneEnd(mProfilerData, mEventName, mDetached, mContextId);
+ }
+ }
+ nvidia::NvProfilerCallback* mCallback;
+ void* mProfilerData;
+ const char* mEventName;
+ bool mDetached;
+ uint64_t mContextId;
+};
+
+
+
+} // end of NVIDIA namespace
+
+
+
+#if NV_DEBUG || NV_CHECKED || NV_PROFILE
+
+#define NV_PROFILE_ZONE(name,context_id) nvidia::NvProfileScoped NV_CONCAT(_scoped,__LINE__)(name,false,context_id)
+#define NV_PROFILE_START_CROSSTHREAD(name,context_id) if ( nvidia::shdfnd::getProfilerCallback() ) nvidia::shdfnd::getProfilerCallback()->zoneStart(name,true,context_id)
+#define NV_PROFILE_STOP_CROSSTHREAD(name,context_id) if ( nvidia::shdfnd::getProfilerCallback() ) nvidia::shdfnd::getProfilerCallback()->zoneEnd(nullptr,name,true,context_id)
+
+#else
+
+#define NV_PROFILE_ZONE(name,context_id)
+#define NV_PROFILE_START_CROSSTHREAD(name,context_id)
+#define NV_PROFILE_STOP_CROSSTHREAD(name,context_id)
+
+#endif
+
+#define NV_PROFILE_POINTER_TO_U64( pointer ) static_cast(reinterpret_cast(pointer))
+
+#endif
diff --git a/blast/include/shared/NvFoundation/NvQuat.h b/blast/include/shared/NvFoundation/NvQuat.h
new file mode 100644
index 000000000..44ce3db44
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvQuat.h
@@ -0,0 +1,405 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVQUAT_H
+#define NV_NVFOUNDATION_NVQUAT_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvVec3.h"
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/**
+\brief This is a quaternion class. For more information on quaternion mathematics
+consult a mathematics source on complex numbers.
+
+*/
+
+class NvQuat
+{
+ public:
+ /**
+ \brief Default constructor, does not do any initialization.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat()
+ {
+ }
+
+ //! identity constructor
+ NV_CUDA_CALLABLE NV_INLINE NvQuat(NvIDENTITY r) : x(0.0f), y(0.0f), z(0.0f), w(1.0f)
+ {
+ NV_UNUSED(r);
+ }
+
+ /**
+ \brief Constructor from a scalar: sets the real part w to the scalar value, and the imaginary parts (x,y,z) to zero
+ */
+ explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(float r) : x(0.0f), y(0.0f), z(0.0f), w(r)
+ {
+ }
+
+ /**
+ \brief Constructor. Take note of the order of the elements!
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw)
+ {
+ }
+
+ /**
+ \brief Creates from angle-axis representation.
+
+ Axis must be normalized!
+
+ Angle is in radians!
+
+ Unit: Radians
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvQuat(float angleRadians, const NvVec3& unitAxis)
+ {
+ NV_ASSERT(NvAbs(1.0f - unitAxis.magnitude()) < 1e-3f);
+ const float a = angleRadians * 0.5f;
+ const float s = NvSin(a);
+ w = NvCos(a);
+ x = unitAxis.x * s;
+ y = unitAxis.y * s;
+ z = unitAxis.z * s;
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat(const NvQuat& v) : x(v.x), y(v.y), z(v.z), w(v.w)
+ {
+ }
+
+ /**
+ \brief Creates from orientation matrix.
+
+ \param[in] m Rotation matrix to extract quaternion from.
+ */
+ NV_CUDA_CALLABLE NV_INLINE explicit NvQuat(const NvMat33& m); /* defined in NvMat33.h */
+
+ /**
+ \brief returns true if quat is identity
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isIdentity() const
+ {
+ return x==0.0f && y==0.0f && z==0.0f && w==1.0f;
+ }
+
+ /**
+ \brief returns true if all elements are finite (not NAN or INF, etc.)
+ */
+ NV_CUDA_CALLABLE bool isFinite() const
+ {
+ return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z) && NvIsFinite(w);
+ }
+
+ /**
+ \brief returns true if finite and magnitude is close to unit
+ */
+
+ NV_CUDA_CALLABLE bool isUnit() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && NvAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns true if finite and magnitude is reasonably close to unit to allow for some accumulation of error vs
+ isValid
+ */
+
+ NV_CUDA_CALLABLE bool isSane() const
+ {
+ const float unitTolerance = 1e-2f;
+ return isFinite() && NvAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns true if the two quaternions are exactly equal
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvQuat& q) const
+ {
+ return x == q.x && y == q.y && z == q.z && w == q.w;
+ }
+
+ /**
+ \brief converts this quaternion to angle-axis representation
+ */
+
+ NV_CUDA_CALLABLE NV_INLINE void toRadiansAndUnitAxis(float& angle, NvVec3& axis) const
+ {
+ const float quatEpsilon = 1.0e-8f;
+ const float s2 = x * x + y * y + z * z;
+ if(s2 < quatEpsilon * quatEpsilon) // can't extract a sensible axis
+ {
+ angle = 0.0f;
+ axis = NvVec3(1.0f, 0.0f, 0.0f);
+ }
+ else
+ {
+ const float s = NvRecipSqrt(s2);
+ axis = NvVec3(x, y, z) * s;
+ angle = NvAbs(w) < quatEpsilon ? NvPi : NvAtan2(s2 * s, w) * 2.0f;
+ }
+ }
+
+ /**
+ \brief Gets the angle between this quat and the identity quaternion.
+
+ Unit: Radians
+ */
+ NV_CUDA_CALLABLE NV_INLINE float getAngle() const
+ {
+ return NvAcos(w) * 2.0f;
+ }
+
+ /**
+ \brief Gets the angle between this quat and the argument
+
+ Unit: Radians
+ */
+ NV_CUDA_CALLABLE NV_INLINE float getAngle(const NvQuat& q) const
+ {
+ return NvAcos(dot(q)) * 2.0f;
+ }
+
+ /**
+ \brief This is the squared 4D vector length, should be 1 for unit quaternions.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y + z * z + w * w;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvQuat& v) const
+ {
+ return x * v.x + y * v.y + z * v.z + w * v.w;
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE NvQuat getNormalized() const
+ {
+ const float s = 1.0f / magnitude();
+ return NvQuat(x * s, y * s, z * s, w * s);
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE float magnitude() const
+ {
+ return NvSqrt(magnitudeSquared());
+ }
+
+ // modifiers:
+ /**
+ \brief maps to the closest unit quaternion.
+ */
+ NV_CUDA_CALLABLE NV_INLINE float normalize() // convert this NvQuat to a unit quaternion
+ {
+ const float mag = magnitude();
+ if(mag != 0.0f)
+ {
+ const float imag = 1.0f / mag;
+
+ x *= imag;
+ y *= imag;
+ z *= imag;
+ w *= imag;
+ }
+ return mag;
+ }
+
+ /*
+ \brief returns the conjugate.
+
+ \note for unit quaternions, this is the inverse.
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvQuat getConjugate() const
+ {
+ return NvQuat(-x, -y, -z, w);
+ }
+
+ /*
+ \brief returns imaginary part.
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec3 getImaginaryPart() const
+ {
+ return NvVec3(x, y, z);
+ }
+
+ /** brief computes rotation of x-axis */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector0() const
+ {
+ const float x2 = x * 2.0f;
+ const float w2 = w * 2.0f;
+ return NvVec3((w * w2) - 1.0f + x * x2, (z * w2) + y * x2, (-y * w2) + z * x2);
+ }
+
+ /** brief computes rotation of y-axis */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector1() const
+ {
+ const float y2 = y * 2.0f;
+ const float w2 = w * 2.0f;
+ return NvVec3((-z * w2) + x * y2, (w * w2) - 1.0f + y * y2, (x * w2) + z * y2);
+ }
+
+ /** brief computes rotation of z-axis */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getBasisVector2() const
+ {
+ const float z2 = z * 2.0f;
+ const float w2 = w * 2.0f;
+ return NvVec3((y * w2) + x * z2, (-x * w2) + y * z2, (w * w2) - 1.0f + z * z2);
+ }
+
+ /**
+ rotates passed vec by this (assumed unitary)
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3 rotate(const NvVec3& v) const
+ {
+ const float vx = 2.0f * v.x;
+ const float vy = 2.0f * v.y;
+ const float vz = 2.0f * v.z;
+ const float w2 = w * w - 0.5f;
+ const float dot2 = (x * vx + y * vy + z * vz);
+ return NvVec3((vx * w2 + (y * vz - z * vy) * w + x * dot2), (vy * w2 + (z * vx - x * vz) * w + y * dot2),
+ (vz * w2 + (x * vy - y * vx) * w + z * dot2));
+ }
+
+ /**
+ inverse rotates passed vec by this (assumed unitary)
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE const NvVec3 rotateInv(const NvVec3& v) const
+ {
+ const float vx = 2.0f * v.x;
+ const float vy = 2.0f * v.y;
+ const float vz = 2.0f * v.z;
+ const float w2 = w * w - 0.5f;
+ const float dot2 = (x * vx + y * vy + z * vz);
+ return NvVec3((vx * w2 - (y * vz - z * vy) * w + x * dot2), (vy * w2 - (z * vx - x * vz) * w + y * dot2),
+ (vz * w2 - (x * vy - y * vx) * w + z * dot2));
+ }
+
+ /**
+ \brief Assignment operator
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator=(const NvQuat& p)
+ {
+ x = p.x;
+ y = p.y;
+ z = p.z;
+ w = p.w;
+ return *this;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator*=(const NvQuat& q)
+ {
+ const float tx = w * q.x + q.w * x + y * q.z - q.y * z;
+ const float ty = w * q.y + q.w * y + z * q.x - q.z * x;
+ const float tz = w * q.z + q.w * z + x * q.y - q.x * y;
+
+ w = w * q.w - q.x * x - y * q.y - q.z * z;
+ x = tx;
+ y = ty;
+ z = tz;
+
+ return *this;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator+=(const NvQuat& q)
+ {
+ x += q.x;
+ y += q.y;
+ z += q.z;
+ w += q.w;
+ return *this;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator-=(const NvQuat& q)
+ {
+ x -= q.x;
+ y -= q.y;
+ z -= q.z;
+ w -= q.w;
+ return *this;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat& operator*=(const float s)
+ {
+ x *= s;
+ y *= s;
+ z *= s;
+ w *= s;
+ return *this;
+ }
+
+ /** quaternion multiplication */
+ NV_CUDA_CALLABLE NV_INLINE NvQuat operator*(const NvQuat& q) const
+ {
+ return NvQuat(w * q.x + q.w * x + y * q.z - q.y * z, w * q.y + q.w * y + z * q.x - q.z * x,
+ w * q.z + q.w * z + x * q.y - q.x * y, w * q.w - x * q.x - y * q.y - z * q.z);
+ }
+
+ /** quaternion addition */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator+(const NvQuat& q) const
+ {
+ return NvQuat(x + q.x, y + q.y, z + q.z, w + q.w);
+ }
+
+ /** quaternion subtraction */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator-() const
+ {
+ return NvQuat(-x, -y, -z, -w);
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator-(const NvQuat& q) const
+ {
+ return NvQuat(x - q.x, y - q.y, z - q.z, w - q.w);
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvQuat operator*(float r) const
+ {
+ return NvQuat(x * r, y * r, z * r, w * r);
+ }
+
+ /** the quaternion elements */
+ float x, y, z, w;
+};
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVQUAT_H
diff --git a/blast/include/shared/NvFoundation/NvSimpleTypes.h b/blast/include/shared/NvFoundation/NvSimpleTypes.h
new file mode 100644
index 000000000..002b6ee7c
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvSimpleTypes.h
@@ -0,0 +1,71 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVSIMPLETYPES_H
+#define NV_NVFOUNDATION_NVSIMPLETYPES_H
+
+/** \addtogroup foundation
+ @{
+*/
+
+// Platform specific types:
+// Design note: Its OK to use int for general loop variables and temps.
+
+#include "NvPreprocessor.h"
+#if NV_VC
+#pragma warning(push)
+#pragma warning(disable : 4668) // suppressing warning generated by Microsoft Visual Studio when including this standard
+// header
+#endif
+
+#if NV_LINUX
+#define __STDC_LIMIT_MACROS
+#endif
+
+#include
+#if NV_VC
+#pragma warning(pop)
+#endif
+// Type ranges
+
+// These are here because we sometimes have non-IEEE compliant platforms to deal with.
+// Removal is under consideration (issue GWSD-34)
+
+#define NV_MAX_F32 3.4028234663852885981170418348452e+38F
+// maximum possible float value
+#define NV_MAX_F64 DBL_MAX // maximum possible double value
+
+#define NV_EPS_F32 FLT_EPSILON // maximum relative error of float rounding
+#define NV_EPS_F64 DBL_EPSILON // maximum relative error of double rounding
+
+#define NV_MAX_REAL NV_MAX_F32
+#define NV_EPS_REAL NV_EPS_F32
+#define NV_NORMALIZATION_EPSILON float(1e-20f)
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVSIMPLETYPES_H
diff --git a/blast/include/shared/NvFoundation/NvTransform.h b/blast/include/shared/NvFoundation/NvTransform.h
new file mode 100644
index 000000000..8b324f0f6
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvTransform.h
@@ -0,0 +1,214 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVTRANSFORM_H
+#define NV_NVFOUNDATION_NVTRANSFORM_H
+/** \addtogroup foundation
+ @{
+*/
+
+#include "NvQuat.h"
+#include "NvPlane.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/*!
+\brief class representing a rigid euclidean transform as a quaternion and a vector
+*/
+
+class NvTransform
+{
+ public:
+ NvQuat q;
+ NvVec3 p;
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform()
+ {
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvVec3& position) : q(NvIdentity), p(position)
+ {
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(NvIDENTITY r) : q(NvIdentity), p(NvZero)
+ {
+ NV_UNUSED(r);
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvQuat& orientation) : q(orientation), p(0)
+ {
+ NV_ASSERT(orientation.isSane());
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform(float x, float y, float z, NvQuat aQ = NvQuat(NvIdentity))
+ : q(aQ), p(x, y, z)
+ {
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform(const NvVec3& p0, const NvQuat& q0) : q(q0), p(p0)
+ {
+ NV_ASSERT(q0.isSane());
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE explicit NvTransform(const NvMat44& m); // defined in NvMat44.h
+
+ /**
+ \brief returns true if the two transforms are exactly equal
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvTransform& t) const
+ {
+ return p == t.p && q == t.q;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform operator*(const NvTransform& x) const
+ {
+ NV_ASSERT(x.isSane());
+ return transform(x);
+ }
+
+ //! Equals matrix multiplication
+ NV_CUDA_CALLABLE NV_INLINE NvTransform& operator*=(NvTransform& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform getInverse() const
+ {
+ NV_ASSERT(isFinite());
+ return NvTransform(q.rotateInv(-p), q.getConjugate());
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transform(const NvVec3& input) const
+ {
+ NV_ASSERT(isFinite());
+ return q.rotate(input) + p;
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 transformInv(const NvVec3& input) const
+ {
+ NV_ASSERT(isFinite());
+ return q.rotateInv(input - p);
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 rotate(const NvVec3& input) const
+ {
+ NV_ASSERT(isFinite());
+ return q.rotate(input);
+ }
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 rotateInv(const NvVec3& input) const
+ {
+ NV_ASSERT(isFinite());
+ return q.rotateInv(input);
+ }
+
+ //! Transform transform to parent (returns compound transform: first src, then *this)
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform transform(const NvTransform& src) const
+ {
+ NV_ASSERT(src.isSane());
+ NV_ASSERT(isSane());
+ // src = [srct, srcr] -> [r*srct + t, r*srcr]
+ return NvTransform(q.rotate(src.p) + p, q * src.q);
+ }
+
+ /**
+ \brief returns true if finite and q is a unit quaternion
+ */
+
+ NV_CUDA_CALLABLE bool isValid() const
+ {
+ return p.isFinite() && q.isFinite() && q.isUnit();
+ }
+
+ /**
+ \brief returns true if finite and quat magnitude is reasonably close to unit to allow for some accumulation of error
+ vs isValid
+ */
+
+ NV_CUDA_CALLABLE bool isSane() const
+ {
+ return isFinite() && q.isSane();
+ }
+
+ /**
+ \brief returns true if all elems are finite (not NAN or INF, etc.)
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite() const
+ {
+ return p.isFinite() && q.isFinite();
+ }
+
+ //! Transform transform from parent (returns compound transform: first src, then this->inverse)
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform transformInv(const NvTransform& src) const
+ {
+ NV_ASSERT(src.isSane());
+ NV_ASSERT(isFinite());
+ // src = [srct, srcr] -> [r^-1*(srct-t), r^-1*srcr]
+ NvQuat qinv = q.getConjugate();
+ return NvTransform(qinv.rotate(src.p - p), qinv * src.q);
+ }
+
+ /**
+ \brief transform plane
+ */
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane transform(const NvPlane& plane) const
+ {
+ NvVec3 transformedNormal = rotate(plane.n);
+ return NvPlane(transformedNormal, plane.d - p.dot(transformedNormal));
+ }
+
+ /**
+ \brief inverse-transform plane
+ */
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvPlane inverseTransform(const NvPlane& plane) const
+ {
+ NvVec3 transformedNormal = rotateInv(plane.n);
+ return NvPlane(transformedNormal, plane.d + p.dot(plane.n));
+ }
+
+ /**
+ \brief return a normalized transform (i.e. one in which the quaternion has unit magnitude)
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvTransform getNormalized() const
+ {
+ return NvTransform(p, q.getNormalized());
+ }
+};
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVTRANSFORM_H
diff --git a/blast/include/shared/NvFoundation/NvVec2.h b/blast/include/shared/NvFoundation/NvVec2.h
new file mode 100644
index 000000000..966a240c8
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvVec2.h
@@ -0,0 +1,346 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVVEC2_H
+#define NV_NVFOUNDATION_NVVEC2_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvMath.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/**
+\brief 2 Element vector class.
+
+This is a 2-dimensional vector class with public data members.
+*/
+class NvVec2
+{
+ public:
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2()
+ {
+ }
+
+ /**
+ \brief zero constructor.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(NvZERO r) : x(0.0f), y(0.0f)
+ {
+ NV_UNUSED(r);
+ }
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(float a) : x(a), y(a)
+ {
+ }
+
+ /**
+ \brief Initializes from 2 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(float nx, float ny) : x(nx), y(ny)
+ {
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2(const NvVec2& v) : x(v.x), y(v.y)
+ {
+ }
+
+ // Operators
+
+ /**
+ \brief Assignment operator
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator=(const NvVec2& p)
+ {
+ x = p.x;
+ y = p.y;
+ return *this;
+ }
+
+ /**
+ \brief element access
+ */
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator[](int index)
+ {
+ NV_ASSERT(index >= 0 && index <= 1);
+
+ return reinterpret_cast(this)[index];
+ }
+
+ /**
+ \brief element access
+ */
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const float& operator[](int index) const
+ {
+ NV_ASSERT(index >= 0 && index <= 1);
+
+ return reinterpret_cast(this)[index];
+ }
+
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator==(const NvVec2& v) const
+ {
+ return x == v.x && y == v.y;
+ }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator!=(const NvVec2& v) const
+ {
+ return x != v.x || y != v.y;
+ }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isZero() const
+ {
+ return x == 0.0f && y == 0.0f;
+ }
+
+ /**
+ \brief returns true if all 2 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool isFinite() const
+ {
+ return NvIsFinite(x) && NvIsFinite(y);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && NvAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling NvSqrt()!
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y;
+ }
+
+ /**
+ \brief returns the magnitude
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitude() const
+ {
+ return NvSqrt(magnitudeSquared());
+ }
+
+ /**
+ \brief negation
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator-() const
+ {
+ return NvVec2(-x, -y);
+ }
+
+ /**
+ \brief vector addition
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator+(const NvVec2& v) const
+ {
+ return NvVec2(x + v.x, y + v.y);
+ }
+
+ /**
+ \brief vector difference
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator-(const NvVec2& v) const
+ {
+ return NvVec2(x - v.x, y - v.y);
+ }
+
+ /**
+ \brief scalar post-multiplication
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator*(float f) const
+ {
+ return NvVec2(x * f, y * f);
+ }
+
+ /**
+ \brief scalar division
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 operator/(float f) const
+ {
+ f = 1.0f / f; // PT: inconsistent notation with operator /=
+ return NvVec2(x * f, y * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator+=(const NvVec2& v)
+ {
+ x += v.x;
+ y += v.y;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator-=(const NvVec2& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator*=(float f)
+ {
+ x *= f;
+ y *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2& operator/=(float f)
+ {
+ f = 1.0f / f; // PT: inconsistent notation with operator /
+ x *= f;
+ y *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvVec2& v) const
+ {
+ return x * v.x + y * v.y;
+ }
+
+ /** return a unit vector */
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 getNormalized() const
+ {
+ const float m = magnitudeSquared();
+ return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec2(0, 0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalize()
+ {
+ const float m = magnitude();
+ if(m > 0.0f)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 multiply(const NvVec2& a) const
+ {
+ return NvVec2(x * a.x, y * a.y);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 minimum(const NvVec2& v) const
+ {
+ return NvVec2(NvMin(x, v.x), NvMin(y, v.y));
+ }
+
+ /**
+ \brief returns MIN(x, y);
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float minElement() const
+ {
+ return NvMin(x, y);
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec2 maximum(const NvVec2& v) const
+ {
+ return NvVec2(NvMax(x, v.x), NvMax(y, v.y));
+ }
+
+ /**
+ \brief returns MAX(x, y);
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float maxElement() const
+ {
+ return NvMax(x, y);
+ }
+
+ float x, y;
+};
+
+NV_CUDA_CALLABLE static NV_FORCE_INLINE NvVec2 operator*(float f, const NvVec2& v)
+{
+ return NvVec2(f * v.x, f * v.y);
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVVEC2_H
diff --git a/blast/include/shared/NvFoundation/NvVec3.h b/blast/include/shared/NvFoundation/NvVec3.h
new file mode 100644
index 000000000..84240e10d
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvVec3.h
@@ -0,0 +1,392 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVVEC3_H
+#define NV_NVFOUNDATION_NVVEC3_H
+
+/** \addtogroup foundation
+@{
+*/
+
+#include "NvMath.h"
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+/**
+\brief 3 Element vector class.
+
+This is a 3-dimensional vector class with public data members.
+*/
+class NvVec3
+{
+ public:
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3()
+ {
+ }
+
+ /**
+ \brief zero constructor.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(NvZERO r) : x(0.0f), y(0.0f), z(0.0f)
+ {
+ NV_UNUSED(r);
+ }
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(float a) : x(a), y(a), z(a)
+ {
+ }
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ \param[in] nz Value to initialize Z component.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(float nx, float ny, float nz) : x(nx), y(ny), z(nz)
+ {
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3(const NvVec3& v) : x(v.x), y(v.y), z(v.z)
+ {
+ }
+
+ // Operators
+
+ /**
+ \brief Assignment operator
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator=(const NvVec3& p)
+ {
+ x = p.x;
+ y = p.y;
+ z = p.z;
+ return *this;
+ }
+
+ /**
+ \brief element access
+ */
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE float& operator[](unsigned int index)
+ {
+ NV_ASSERT(index <= 2);
+
+ return reinterpret_cast(this)[index];
+ }
+
+ /**
+ \brief element access
+ */
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_FORCE_INLINE const float& operator[](unsigned int index) const
+ {
+ NV_ASSERT(index <= 2);
+
+ return reinterpret_cast(this)[index];
+ }
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator==(const NvVec3& v) const
+ {
+ return x == v.x && y == v.y && z == v.z;
+ }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool operator!=(const NvVec3& v) const
+ {
+ return x != v.x || y != v.y || z != v.z;
+ }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isZero() const
+ {
+ return x == 0.0f && y == 0.0f && z == 0.0f;
+ }
+
+ /**
+ \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool isFinite() const
+ {
+ return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && NvAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling NvSqrt()!
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y + z * z;
+ }
+
+ /**
+ \brief returns the magnitude
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float magnitude() const
+ {
+ return NvSqrt(magnitudeSquared());
+ }
+
+ /**
+ \brief negation
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator-() const
+ {
+ return NvVec3(-x, -y, -z);
+ }
+
+ /**
+ \brief vector addition
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator+(const NvVec3& v) const
+ {
+ return NvVec3(x + v.x, y + v.y, z + v.z);
+ }
+
+ /**
+ \brief vector difference
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator-(const NvVec3& v) const
+ {
+ return NvVec3(x - v.x, y - v.y, z - v.z);
+ }
+
+ /**
+ \brief scalar post-multiplication
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator*(float f) const
+ {
+ return NvVec3(x * f, y * f, z * f);
+ }
+
+ /**
+ \brief scalar division
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 operator/(float f) const
+ {
+ f = 1.0f / f;
+ return NvVec3(x * f, y * f, z * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator+=(const NvVec3& v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator-=(const NvVec3& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator*=(float f)
+ {
+ x *= f;
+ y *= f;
+ z *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3& operator/=(float f)
+ {
+ f = 1.0f / f;
+ x *= f;
+ y *= f;
+ z *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float dot(const NvVec3& v) const
+ {
+ return x * v.x + y * v.y + z * v.z;
+ }
+
+ /**
+ \brief cross product
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 cross(const NvVec3& v) const
+ {
+ return NvVec3(y * v.z - z * v.y, z * v.x - x * v.z, x * v.y - y * v.x);
+ }
+
+ /** return a unit vector */
+
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 getNormalized() const
+ {
+ const float m = magnitudeSquared();
+ return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec3(0, 0, 0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalize()
+ {
+ const float m = magnitude();
+ if(m > 0.0f)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief normalizes the vector in place. Does nothing if vector magnitude is under NV_NORMALIZATION_EPSILON.
+ Returns vector magnitude if >= NV_NORMALIZATION_EPSILON and 0.0f otherwise.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalizeSafe()
+ {
+ const float mag = magnitude();
+ if(mag < NV_NORMALIZATION_EPSILON)
+ return 0.0f;
+ *this *= 1.0f / mag;
+ return mag;
+ }
+
+ /**
+ \brief normalizes the vector in place. Asserts if vector magnitude is under NV_NORMALIZATION_EPSILON.
+ returns vector magnitude.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float normalizeFast()
+ {
+ const float mag = magnitude();
+ NV_ASSERT(mag >= NV_NORMALIZATION_EPSILON);
+ *this *= 1.0f / mag;
+ return mag;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 multiply(const NvVec3& a) const
+ {
+ return NvVec3(x * a.x, y * a.y, z * a.z);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 minimum(const NvVec3& v) const
+ {
+ return NvVec3(NvMin(x, v.x), NvMin(y, v.y), NvMin(z, v.z));
+ }
+
+ /**
+ \brief returns MIN(x, y, z);
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float minElement() const
+ {
+ return NvMin(x, NvMin(y, z));
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 maximum(const NvVec3& v) const
+ {
+ return NvVec3(NvMax(x, v.x), NvMax(y, v.y), NvMax(z, v.z));
+ }
+
+ /**
+ \brief returns MAX(x, y, z);
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE float maxElement() const
+ {
+ return NvMax(x, NvMax(y, z));
+ }
+
+ /**
+ \brief returns absolute values of components;
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec3 abs() const
+ {
+ return NvVec3(NvAbs(x), NvAbs(y), NvAbs(z));
+ }
+
+ float x, y, z;
+};
+
+NV_CUDA_CALLABLE static NV_FORCE_INLINE NvVec3 operator*(float f, const NvVec3& v)
+{
+ return NvVec3(f * v.x, f * v.y, f * v.z);
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVVEC3_H
diff --git a/blast/include/shared/NvFoundation/NvVec4.h b/blast/include/shared/NvFoundation/NvVec4.h
new file mode 100644
index 000000000..d216bfc85
--- /dev/null
+++ b/blast/include/shared/NvFoundation/NvVec4.h
@@ -0,0 +1,375 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_NVFOUNDATION_NVVEC4_H
+#define NV_NVFOUNDATION_NVVEC4_H
+/** \addtogroup foundation
+@{
+*/
+#include "NvMath.h"
+#include "NvVec3.h"
+#include "NvAssert.h"
+
+/**
+\brief 4 Element vector class.
+
+This is a 4-dimensional vector class with public data members.
+*/
+#if !NV_DOXYGEN
+namespace nvidia
+{
+#endif
+
+class NvVec4
+{
+ public:
+ /**
+ \brief default constructor leaves data uninitialized.
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4()
+ {
+ }
+
+ /**
+ \brief zero constructor.
+ */
+ NV_CUDA_CALLABLE NV_FORCE_INLINE NvVec4(NvZERO r) : x(0.0f), y(0.0f), z(0.0f), w(0.0f)
+ {
+ NV_UNUSED(r);
+ }
+
+ /**
+ \brief Assigns scalar parameter to all elements.
+
+ Useful to initialize to zero or one.
+
+ \param[in] a Value to assign to elements.
+ */
+ explicit NV_CUDA_CALLABLE NV_INLINE NvVec4(float a) : x(a), y(a), z(a), w(a)
+ {
+ }
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] nx Value to initialize X component.
+ \param[in] ny Value to initialize Y component.
+ \param[in] nz Value to initialize Z component.
+ \param[in] nw Value to initialize W component.
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4(float nx, float ny, float nz, float nw) : x(nx), y(ny), z(nz), w(nw)
+ {
+ }
+
+ /**
+ \brief Initializes from 3 scalar parameters.
+
+ \param[in] v Value to initialize the X, Y, and Z components.
+ \param[in] nw Value to initialize W component.
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4(const NvVec3& v, float nw) : x(v.x), y(v.y), z(v.z), w(nw)
+ {
+ }
+
+ /**
+ \brief Initializes from an array of scalar parameters.
+
+ \param[in] v Value to initialize with.
+ */
+ explicit NV_CUDA_CALLABLE NV_INLINE NvVec4(const float v[]) : x(v[0]), y(v[1]), z(v[2]), w(v[3])
+ {
+ }
+
+ /**
+ \brief Copy ctor.
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4(const NvVec4& v) : x(v.x), y(v.y), z(v.z), w(v.w)
+ {
+ }
+
+ // Operators
+
+ /**
+ \brief Assignment operator
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator=(const NvVec4& p)
+ {
+ x = p.x;
+ y = p.y;
+ z = p.z;
+ w = p.w;
+ return *this;
+ }
+
+ /**
+ \brief element access
+ */
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_INLINE float& operator[](unsigned int index)
+ {
+ NV_ASSERT(index <= 3);
+
+ return reinterpret_cast(this)[index];
+ }
+
+ /**
+ \brief element access
+ */
+ NV_DEPRECATED NV_CUDA_CALLABLE NV_INLINE const float& operator[](unsigned int index) const
+ {
+ NV_ASSERT(index <= 3);
+
+ return reinterpret_cast(this)[index];
+ }
+
+ /**
+ \brief returns true if the two vectors are exactly equal.
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool operator==(const NvVec4& v) const
+ {
+ return x == v.x && y == v.y && z == v.z && w == v.w;
+ }
+
+ /**
+ \brief returns true if the two vectors are not exactly equal.
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool operator!=(const NvVec4& v) const
+ {
+ return x != v.x || y != v.y || z != v.z || w != v.w;
+ }
+
+ /**
+ \brief tests for exact zero vector
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool isZero() const
+ {
+ return x == 0 && y == 0 && z == 0 && w == 0;
+ }
+
+ /**
+ \brief returns true if all 3 elems of the vector are finite (not NAN or INF, etc.)
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool isFinite() const
+ {
+ return NvIsFinite(x) && NvIsFinite(y) && NvIsFinite(z) && NvIsFinite(w);
+ }
+
+ /**
+ \brief is normalized - used by API parameter validation
+ */
+ NV_CUDA_CALLABLE NV_INLINE bool isNormalized() const
+ {
+ const float unitTolerance = 1e-4f;
+ return isFinite() && NvAbs(magnitude() - 1) < unitTolerance;
+ }
+
+ /**
+ \brief returns the squared magnitude
+
+ Avoids calling NvSqrt()!
+ */
+ NV_CUDA_CALLABLE NV_INLINE float magnitudeSquared() const
+ {
+ return x * x + y * y + z * z + w * w;
+ }
+
+ /**
+ \brief returns the magnitude
+ */
+ NV_CUDA_CALLABLE NV_INLINE float magnitude() const
+ {
+ return NvSqrt(magnitudeSquared());
+ }
+
+ /**
+ \brief negation
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator-() const
+ {
+ return NvVec4(-x, -y, -z, -w);
+ }
+
+ /**
+ \brief vector addition
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator+(const NvVec4& v) const
+ {
+ return NvVec4(x + v.x, y + v.y, z + v.z, w + v.w);
+ }
+
+ /**
+ \brief vector difference
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator-(const NvVec4& v) const
+ {
+ return NvVec4(x - v.x, y - v.y, z - v.z, w - v.w);
+ }
+
+ /**
+ \brief scalar post-multiplication
+ */
+
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator*(float f) const
+ {
+ return NvVec4(x * f, y * f, z * f, w * f);
+ }
+
+ /**
+ \brief scalar division
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 operator/(float f) const
+ {
+ f = 1.0f / f;
+ return NvVec4(x * f, y * f, z * f, w * f);
+ }
+
+ /**
+ \brief vector addition
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator+=(const NvVec4& v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ w += v.w;
+ return *this;
+ }
+
+ /**
+ \brief vector difference
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator-=(const NvVec4& v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ w -= v.w;
+ return *this;
+ }
+
+ /**
+ \brief scalar multiplication
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator*=(float f)
+ {
+ x *= f;
+ y *= f;
+ z *= f;
+ w *= f;
+ return *this;
+ }
+ /**
+ \brief scalar division
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4& operator/=(float f)
+ {
+ f = 1.0f / f;
+ x *= f;
+ y *= f;
+ z *= f;
+ w *= f;
+ return *this;
+ }
+
+ /**
+ \brief returns the scalar product of this and other.
+ */
+ NV_CUDA_CALLABLE NV_INLINE float dot(const NvVec4& v) const
+ {
+ return x * v.x + y * v.y + z * v.z + w * v.w;
+ }
+
+ /** return a unit vector */
+
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 getNormalized() const
+ {
+ float m = magnitudeSquared();
+ return m > 0.0f ? *this * NvRecipSqrt(m) : NvVec4(0, 0, 0, 0);
+ }
+
+ /**
+ \brief normalizes the vector in place
+ */
+ NV_CUDA_CALLABLE NV_INLINE float normalize()
+ {
+ float m = magnitude();
+ if(m > 0.0f)
+ *this /= m;
+ return m;
+ }
+
+ /**
+ \brief a[i] * b[i], for all i.
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 multiply(const NvVec4& a) const
+ {
+ return NvVec4(x * a.x, y * a.y, z * a.z, w * a.w);
+ }
+
+ /**
+ \brief element-wise minimum
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 minimum(const NvVec4& v) const
+ {
+ return NvVec4(NvMin(x, v.x), NvMin(y, v.y), NvMin(z, v.z), NvMin(w, v.w));
+ }
+
+ /**
+ \brief element-wise maximum
+ */
+ NV_CUDA_CALLABLE NV_INLINE NvVec4 maximum(const NvVec4& v) const
+ {
+ return NvVec4(NvMax(x, v.x), NvMax(y, v.y), NvMax(z, v.z), NvMax(w, v.w));
+ }
+
+ NV_CUDA_CALLABLE NV_INLINE NvVec3 getXYZ() const
+ {
+ return NvVec3(x, y, z);
+ }
+
+ /**
+ \brief set vector elements to zero
+ */
+ NV_CUDA_CALLABLE NV_INLINE void setZero()
+ {
+ x = y = z = w = 0.0f;
+ }
+
+ float x, y, z, w;
+};
+
+NV_CUDA_CALLABLE static NV_INLINE NvVec4 operator*(float f, const NvVec4& v)
+{
+ return NvVec4(f * v.x, f * v.y, f * v.z, f * v.w);
+}
+
+#if !NV_DOXYGEN
+} // namespace nvidia
+#endif
+
+/** @} */
+#endif // #ifndef NV_NVFOUNDATION_NVVEC4_H
diff --git a/blast/include/shared/NvFoundation/platform/unix/NvUnixIntrinsics.h b/blast/include/shared/NvFoundation/platform/unix/NvUnixIntrinsics.h
new file mode 100644
index 000000000..73f874572
--- /dev/null
+++ b/blast/include/shared/NvFoundation/platform/unix/NvUnixIntrinsics.h
@@ -0,0 +1,173 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_UNIX_NVUNIXINTRINSICS_H
+#define NV_UNIX_NVUNIXINTRINSICS_H
+
+#include "Nv.h"
+#include "NvAssert.h"
+
+#if !(NV_LINUX || NV_ANDROID || NV_PS4 || NV_APPLE_FAMILY)
+#error "This file should only be included by Unix builds!!"
+#endif
+
+#include
+#include
+
+namespace nvidia
+{
+namespace intrinsics
+{
+//! \brief platform-specific absolute value
+NV_CUDA_CALLABLE NV_FORCE_INLINE float abs(float a)
+{
+ return ::fabsf(a);
+}
+
+//! \brief platform-specific select float
+NV_CUDA_CALLABLE NV_FORCE_INLINE float fsel(float a, float b, float c)
+{
+ return (a >= 0.0f) ? b : c;
+}
+
+//! \brief platform-specific sign
+NV_CUDA_CALLABLE NV_FORCE_INLINE float sign(float a)
+{
+ return (a >= 0.0f) ? 1.0f : -1.0f;
+}
+
+//! \brief platform-specific reciprocal
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recip(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific reciprocal estimate
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific square root
+NV_CUDA_CALLABLE NV_FORCE_INLINE float sqrt(float a)
+{
+ return ::sqrtf(a);
+}
+
+//! \brief platform-specific reciprocal square root
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrt(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+//! \brief platform-specific sine
+NV_CUDA_CALLABLE NV_FORCE_INLINE float sin(float a)
+{
+ return ::sinf(a);
+}
+
+//! \brief platform-specific cosine
+NV_CUDA_CALLABLE NV_FORCE_INLINE float cos(float a)
+{
+ return ::cosf(a);
+}
+
+//! \brief platform-specific minimum
+NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMin(float a, float b)
+{
+ return a < b ? a : b;
+}
+
+//! \brief platform-specific maximum
+NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMax(float a, float b)
+{
+ return a > b ? a : b;
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(float a)
+{
+ return !!isfinite(a);
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(double a)
+{
+ return !!isfinite(a);
+}
+
+/*!
+Sets \c count bytes starting at \c dst to zero.
+*/
+NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count)
+{
+ return memset(dest, 0, count);
+}
+
+/*!
+Sets \c count bytes starting at \c dst to \c c.
+*/
+NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count)
+{
+ return memset(dest, c, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
+*/
+NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count)
+{
+ return memcpy(dest, src, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
+*/
+NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count)
+{
+ return memmove(dest, src, count);
+}
+
+/*!
+Set 128B to zero starting at \c dst+offset. Must be aligned.
+*/
+NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0)
+{
+ NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0);
+ memSet(reinterpret_cast(dest) + offset, 0, 128);
+}
+
+} // namespace intrinsics
+} // namespace nvidia
+
+#endif // #ifndef NV_UNIX_NVUNIXINTRINSICS_H
diff --git a/blast/include/shared/NvFoundation/platform/windows/NvWindowsIntrinsics.h b/blast/include/shared/NvFoundation/platform/windows/NvWindowsIntrinsics.h
new file mode 100644
index 000000000..e5a9016b5
--- /dev/null
+++ b/blast/include/shared/NvFoundation/platform/windows/NvWindowsIntrinsics.h
@@ -0,0 +1,187 @@
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of NVIDIA CORPORATION nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved.
+// Copyright (c) 2001-2023 NovodeX AG. All rights reserved.
+
+#ifndef NV_WINDOWS_NVWINDOWSINTRINSICS_H
+#define NV_WINDOWS_NVWINDOWSINTRINSICS_H
+
+#include "Nv.h"
+#include "NvAssert.h"
+
+#if !NV_WINDOWS_FAMILY
+#error "This file should only be included by Windows or WIN8ARM builds!!"
+#endif
+
+#include
+#include
+
+#if !NV_DOXYGEN
+namespace nvidia
+{
+namespace intrinsics
+{
+#endif
+
+//! \brief platform-specific absolute value
+NV_CUDA_CALLABLE NV_FORCE_INLINE float abs(float a)
+{
+ return ::fabsf(a);
+}
+
+//! \brief platform-specific select float
+NV_CUDA_CALLABLE NV_FORCE_INLINE float fsel(float a, float b, float c)
+{
+ return (a >= 0.0f) ? b : c;
+}
+
+//! \brief platform-specific sign
+NV_CUDA_CALLABLE NV_FORCE_INLINE float sign(float a)
+{
+ return (a >= 0.0f) ? 1.0f : -1.0f;
+}
+
+//! \brief platform-specific reciprocal
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recip(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific reciprocal estimate
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recipFast(float a)
+{
+ return 1.0f / a;
+}
+
+//! \brief platform-specific square root
+NV_CUDA_CALLABLE NV_FORCE_INLINE float sqrt(float a)
+{
+ return ::sqrtf(a);
+}
+
+//! \brief platform-specific reciprocal square root
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrt(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+//! \brief platform-specific reciprocal square root estimate
+NV_CUDA_CALLABLE NV_FORCE_INLINE float recipSqrtFast(float a)
+{
+ return 1.0f / ::sqrtf(a);
+}
+
+//! \brief platform-specific sine
+NV_CUDA_CALLABLE NV_FORCE_INLINE float sin(float a)
+{
+ return ::sinf(a);
+}
+
+//! \brief platform-specific cosine
+NV_CUDA_CALLABLE NV_FORCE_INLINE float cos(float a)
+{
+ return ::cosf(a);
+}
+
+//! \brief platform-specific minimum
+NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMin(float a, float b)
+{
+ return a < b ? a : b;
+}
+
+//! \brief platform-specific maximum
+NV_CUDA_CALLABLE NV_FORCE_INLINE float selectMax(float a, float b)
+{
+ return a > b ? a : b;
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(float a)
+{
+#ifdef __CUDACC__
+ return !!isfinite(a);
+#else
+ return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
+#endif
+}
+
+//! \brief platform-specific finiteness check (not INF or NAN)
+NV_CUDA_CALLABLE NV_FORCE_INLINE bool isFinite(double a)
+{
+#ifdef __CUDACC__
+ return !!isfinite(a);
+#else
+ return (0 == ((_FPCLASS_SNAN | _FPCLASS_QNAN | _FPCLASS_NINF | _FPCLASS_PINF) & _fpclass(a)));
+#endif
+}
+
+/*!
+Sets \c count bytes starting at \c dst to zero.
+*/
+NV_FORCE_INLINE void* memZero(void* NV_RESTRICT dest, uint32_t count)
+{
+ return memset(dest, 0, count);
+}
+
+/*!
+Sets \c count bytes starting at \c dst to \c c.
+*/
+NV_FORCE_INLINE void* memSet(void* NV_RESTRICT dest, int32_t c, uint32_t count)
+{
+ return memset(dest, c, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. User memMove if regions overlap.
+*/
+NV_FORCE_INLINE void* memCopy(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count)
+{
+ return memcpy(dest, src, count);
+}
+
+/*!
+Copies \c count bytes from \c src to \c dst. Supports overlapping regions.
+*/
+NV_FORCE_INLINE void* memMove(void* NV_RESTRICT dest, const void* NV_RESTRICT src, uint32_t count)
+{
+ return memmove(dest, src, count);
+}
+
+/*!
+Set 128B to zero starting at \c dst+offset. Must be aligned.
+*/
+NV_FORCE_INLINE void memZero128(void* NV_RESTRICT dest, uint32_t offset = 0)
+{
+ NV_ASSERT(((size_t(dest) + offset) & 0x7f) == 0);
+ memSet((char * NV_RESTRICT)dest + offset, 0, 128);
+}
+
+#if !NV_DOXYGEN
+} // namespace intrinsics
+} // namespace nvidia
+#endif
+
+#endif // #ifndef NV_WINDOWS_NVWINDOWSINTRINSICS_H
diff --git a/blast/include/toolkit/NvBlastTk.h b/blast/include/toolkit/NvBlastTk.h
index 5c1324820..95e638f80 100644
--- a/blast/include/toolkit/NvBlastTk.h
+++ b/blast/include/toolkit/NvBlastTk.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/toolkit/NvBlastTkActor.h b/blast/include/toolkit/NvBlastTkActor.h
index 4301657f9..3439827ac 100644
--- a/blast/include/toolkit/NvBlastTkActor.h
+++ b/blast/include/toolkit/NvBlastTkActor.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/toolkit/NvBlastTkAsset.h b/blast/include/toolkit/NvBlastTkAsset.h
index 954e13885..f7fe5e4b8 100644
--- a/blast/include/toolkit/NvBlastTkAsset.h
+++ b/blast/include/toolkit/NvBlastTkAsset.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -33,7 +33,7 @@
#include "NvBlastTkIdentifiable.h"
#include "NvBlastTypes.h"
-#include "foundation/PxVec3.h"
+#include "NvVec3.h"
// Forward declarations
struct NvBlastAsset;
@@ -50,7 +50,7 @@ A descriptor stored by a TkAsset for an internal joint. Internal joints are cre
struct TkAssetJointDesc
{
uint32_t nodeIndices[2]; //!< The graph node indices corresponding to the support chunks joined by a joint
- physx::PxVec3 attachPositions[2]; //!< The joint's attachment positions in asset-local space
+ nvidia::NvVec3 attachPositions[2]; //!< The joint's attachment positions in asset-local space
};
diff --git a/blast/include/toolkit/NvBlastTkEvent.h b/blast/include/toolkit/NvBlastTkEvent.h
index 63d249034..56e3d3041 100644
--- a/blast/include/toolkit/NvBlastTkEvent.h
+++ b/blast/include/toolkit/NvBlastTkEvent.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/toolkit/NvBlastTkFamily.h b/blast/include/toolkit/NvBlastTkFamily.h
index e116a7bfa..17498d7f3 100644
--- a/blast/include/toolkit/NvBlastTkFamily.h
+++ b/blast/include/toolkit/NvBlastTkFamily.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/toolkit/NvBlastTkFramework.h b/blast/include/toolkit/NvBlastTkFramework.h
index 4c3ee9e0f..226970fca 100644
--- a/blast/include/toolkit/NvBlastTkFramework.h
+++ b/blast/include/toolkit/NvBlastTkFramework.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -35,17 +35,10 @@
#include "NvBlastTkType.h"
#include "NvBlastTkEvent.h"
-#include "NvBlastPreprocessor.h"
+#include "NvPreprocessor.h"
#include "NvBlastTypes.h"
-#include "foundation/PxVec3.h"
-
-
-// Forward declarations
-namespace physx
-{
-class PxTransform;
-}
+#include "NvVec3.h"
namespace Nv
@@ -133,7 +126,7 @@ struct TkJointDesc
{
TkFamily* families[2]; //!< The TkFamily objects containing the chunks joined by the joint
uint32_t chunkIndices[2]; //!< The chunk indices within the corresponding TkFamily objects joined by the joint. The indexed chunks will be support chunks.
- physx::PxVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor which owns the chunks jointed by this joint
+ nvidia::NvVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor which owns the chunks jointed by this joint
};
@@ -317,7 +310,7 @@ Create a new TkFramework. This creates a global singleton, and will fail if a T
\return the new TkFramework if successful, NULL otherwise.
*/
-NVBLAST_API Nv::Blast::TkFramework* NvBlastTkFrameworkCreate();
+NV_C_API Nv::Blast::TkFramework* NvBlastTkFrameworkCreate();
/**
@@ -325,7 +318,7 @@ Retrieve a pointer to the global TkFramework singleton (if it exists).
\return the pointer to the global TkFramework (NULL if none exists).
*/
-NVBLAST_API Nv::Blast::TkFramework* NvBlastTkFrameworkGet();
+NV_C_API Nv::Blast::TkFramework* NvBlastTkFrameworkGet();
#endif // ifndef NVBLASTTKFRAMEWORK_H
diff --git a/blast/include/toolkit/NvBlastTkGroup.h b/blast/include/toolkit/NvBlastTkGroup.h
index 56d4603e5..4bc873471 100644
--- a/blast/include/toolkit/NvBlastTkGroup.h
+++ b/blast/include/toolkit/NvBlastTkGroup.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/extensions/physx/NvBlastExtPxTask.h b/blast/include/toolkit/NvBlastTkGroupTaskManager.h
similarity index 82%
rename from blast/include/extensions/physx/NvBlastExtPxTask.h
rename to blast/include/toolkit/NvBlastTkGroupTaskManager.h
index a6e2c62da..7061103db 100644
--- a/blast/include/extensions/physx/NvBlastExtPxTask.h
+++ b/blast/include/toolkit/NvBlastTkGroupTaskManager.h
@@ -22,24 +22,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
-//! @brief Defines a task manager API for multithreading ExtPx oerations
+//! @brief Defines a task manager API for multithreading Tk operations
-#ifndef NVBLASTEXTPXTASK_H
-#define NVBLASTEXTPXTASK_H
+#ifndef NVBLASTTKGROUPTASKMANAGER_H
+#define NVBLASTTKGROUPTASKMANAGER_H
#include "NvBlastTypes.h"
// Forward declarations
-namespace physx
+namespace nvidia
{
-class PxTaskManager;
+namespace task
+{
+class NvTaskManager;
+}
}
-
namespace Nv
{
@@ -52,18 +54,18 @@ class TkGroup;
/**
-Uses a physx::PxTaskManager to process a TkGroup concurrently.
+Uses a nvidia::task::NvTaskManager to process a TkGroup concurrently.
*/
-class NV_DLL_EXPORT ExtGroupTaskManager
+class NV_DLL_EXPORT TkGroupTaskManager
{
protected:
- virtual ~ExtGroupTaskManager() {}
+ virtual ~TkGroupTaskManager() {}
public:
/**
- Construct using existing physx::PxTaskManager and TkGroup. The TkGroup can be set later with setGroup().
+ Construct using existing nvidia::task::NvTaskManager and TkGroup. The TkGroup can be set later with setGroup().
*/
- static ExtGroupTaskManager* create(physx::PxTaskManager&, TkGroup* = nullptr);
+ static TkGroupTaskManager* create(nvidia::task::NvTaskManager&, TkGroup* = nullptr);
/**
Set the group to process. Cannot be changed while a group being processed.
@@ -105,4 +107,4 @@ class NV_DLL_EXPORT ExtGroupTaskManager
} // namespace Blast
} // namespace Nv
-#endif // NVBLASTEXTPXTASK_H
+#endif // NVBLASTTKGROUPTASKMANAGER_H
diff --git a/blast/include/toolkit/NvBlastTkIdentifiable.h b/blast/include/toolkit/NvBlastTkIdentifiable.h
index 70e978285..684d80a6a 100644
--- a/blast/include/toolkit/NvBlastTkIdentifiable.h
+++ b/blast/include/toolkit/NvBlastTkIdentifiable.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/toolkit/NvBlastTkJoint.h b/blast/include/toolkit/NvBlastTkJoint.h
index 96267f37d..a2e50840b 100644
--- a/blast/include/toolkit/NvBlastTkJoint.h
+++ b/blast/include/toolkit/NvBlastTkJoint.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
@@ -33,7 +33,7 @@
#include "NvBlastTkObject.h"
-#include "foundation/PxVec3.h"
+#include "NvVec3.h"
namespace Nv
@@ -48,7 +48,7 @@ struct TkJointData
{
TkActor* actors[2]; //!< The TkActor objects joined by the joint
uint32_t chunkIndices[2]; //!< The chunk indices within the corresponding TkActor objects joined by the joint. The indexed chunks will be support chunks.
- physx::PxVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor
+ nvidia::NvVec3 attachPositions[2]; //!< The position of the joint relative to each TkActor
};
diff --git a/blast/include/toolkit/NvBlastTkObject.h b/blast/include/toolkit/NvBlastTkObject.h
index 7b8d0e412..5644fcb43 100644
--- a/blast/include/toolkit/NvBlastTkObject.h
+++ b/blast/include/toolkit/NvBlastTkObject.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
//! @file
//!
diff --git a/blast/include/toolkit/NvBlastTkType.h b/blast/include/toolkit/NvBlastTkType.h
index 07ab32b23..a79c38e64 100644
--- a/blast/include/toolkit/NvBlastTkType.h
+++ b/blast/include/toolkit/NvBlastTkType.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTTKTYPE_H
diff --git a/blast/premake5.lua b/blast/premake5.lua
index 0f6307480..49422dbcd 100644
--- a/blast/premake5.lua
+++ b/blast/premake5.lua
@@ -83,6 +83,7 @@ local root = repo_build.get_abs_path(".")
repo_build.prebuild_copy {
{ "include", "_build/%{platform}/%{config}/"..workspace_name.."/include" },
{ "source/sdk/common", "_build/%{platform}/%{config}/"..workspace_name.."/source/sdk/common" },
+ { "source/shared/NsFoundation", "_build/%{platform}/%{config}/"..workspace_name.."/source/shared/NsFoundation" },
{ "PACKAGE-LICENSES", "_build/%{platform}/%{config}/"..workspace_name.."/PACKAGE-LICENSES" }
}
@@ -357,11 +358,16 @@ end
group "sdk"
project "NvBlast"
blast_sdklib_standard_setup("lowlevel")
+ includedirs {
+ "include/shared/NvFoundation",
+ }
project "NvBlastGlobals"
blast_sdklib_standard_setup("globals")
includedirs {
"include/lowlevel",
+ "source/shared/NsFoundation/include",
+ "include/shared/NvFoundation",
}
project "NvBlastExtShaders"
@@ -370,9 +376,8 @@ group "sdk"
includedirs {
"include/lowlevel",
"include/globals",
- target_deps.."/physxsdk/include",
- target_deps.."/physxsdk/source/foundation/include",
- target_deps.."/pxshared/include",
+ "include/shared/NvFoundation",
+ "source/shared/NsFoundation/include",
}
filter { "system:windows" }
disablewarnings {
@@ -390,6 +395,7 @@ group "sdk"
includedirs {
"include/lowlevel",
"include/globals",
+ "include/shared/NvFoundation",
}
project "NvBlastExtAuthoring"
@@ -404,9 +410,8 @@ group "sdk"
"source/sdk/extensions/authoringCommon",
"source/sdk/extensions/authoring/VHACD/inc",
"source/sdk/extensions/authoring/VHACD/public",
- target_deps.."/physxsdk/include",
- target_deps.."/physxsdk/source/foundation/include",
- target_deps.."/pxshared/include",
+ "include/shared/NvFoundation",
+ "source/shared/NsFoundation/include",
target_deps.."/BoostMultiprecision",
}
files {
@@ -440,9 +445,10 @@ group "sdk"
"include/lowlevel",
"include/globals",
"source/sdk/globals",
- target_deps.."/physxsdk/include",
- target_deps.."/physxsdk/source/foundation/include",
- target_deps.."/pxshared/include",
+ "include/shared/NvFoundation",
+ "source/shared/NsFoundation/include",
+ "source/shared/NsFileBuffer/include",
+ "source/shared/NvTask/include",
}
project "NvBlastExtStress"
@@ -454,10 +460,9 @@ group "sdk"
includedirs {
"include/lowlevel",
"include/globals",
+ "include/shared/NvFoundation",
+ "source/shared/NsFoundation/include",
"source/shared/stress_solver",
- target_deps.."/physxsdk/include",
- target_deps.."/physxsdk/source/foundation/include",
- target_deps.."/pxshared/include",
}
files {
"source/shared/stress_solver/stress.cpp",
@@ -490,9 +495,10 @@ group "sdk"
"include/globals",
"_build/host-deps/CapnProto/src",
capnp_gen_path,
- target_deps.."/physxsdk/include",
- target_deps.."/physxsdk/source/foundation/include",
- target_deps.."/pxshared/include",
+ "include/shared/NvFoundation",
+ "source/shared/NsFoundation/include",
+ "source/shared/NsFileBuffer/include",
+ "source/shared/NvTask/include",
}
blast_sdklib_common_files()
add_files("source/sdk/extensions/serialization",
@@ -540,9 +546,10 @@ group "sdk"
"include/globals",
"_build/host-deps/CapnProto/src",
capnp_gen_path,
- target_deps.."/physxsdk/include",
- target_deps.."/physxsdk/source/foundation/include",
- target_deps.."/pxshared/include",
+ "include/shared/NvFoundation",
+ "source/shared/NsFoundation/include",
+ "source/shared/NsFileBuffer/include",
+ "source/shared/NvTask/include",
}
blast_sdklib_common_files()
add_files("source/sdk/extensions/serialization",
@@ -557,7 +564,7 @@ group "sdk"
{
"AssetDTO.cpp",
"TkAssetDTO.cpp",
- "PxVec3DTO.cpp",
+ "NvVec3DTO.cpp",
"NvBlastChunkDTO.cpp",
"NvBlastBondDTO.cpp",
"NvBlastIDDTO.cpp",
@@ -615,29 +622,23 @@ group "tests"
"NvBlastFamily.cpp",
})
- add_files("source/shared/utils", {
- "AssetGenerator.cpp",
+ add_files("source/sdk/toolkit", {
+ "NvBlastTkTaskManager.cpp",
})
- add_files("source/sdk/extensions/physx", { -- !!!
- "NvBlastExtPxTaskImpl.cpp",
+ add_files("source/shared/utils", {
+ "AssetGenerator.cpp",
})
- filter { "system:linux" }
- add_files("source/shared/task", {
- "TaskManager.cpp"
- })
- filter {}
-
includedirs {
"include/globals",
"include/lowlevel",
"include/toolkit",
"include/extensions/assetutils",
- "include/extensions/physx", -- !!!
"include/extensions/shaders",
"include/extensions/serialization",
"source/sdk/common",
+ "source/sdk/globals",
"source/sdk/lowlevel",
"source/sdk/extensions/serialization",
"source/test/src",
@@ -645,28 +646,25 @@ group "tests"
"source/test/src/utils",
"source/shared/filebuf/include",
"source/shared/utils",
- target_deps.."/physxsdk/include",
- target_deps.."/physxsdk/source/foundation/include",
- target_deps.."/pxshared/include",
+ "include/shared/NvFoundation",
+ "source/shared/NsFoundation/include",
+ "source/shared/NsFileBuffer/include",
+ "source/shared/NvTask/include",
target_deps.."/googletest/include",
}
filter { "system:windows", "configurations:debug" }
- libdirs { target_deps.."/googletest/lib/vc14win64-cmake/Debug", target_deps.."/physxsdk/bin/win.x86_64.vc141.md/debug" }
- repo_build.copy_to_targetdir(target_deps.."/physxsdk/bin/win.x86_64.vc141.md/debug/PhysXFoundation_64.dll")
+ libdirs { target_deps.."/googletest/lib/vc14win64-cmake/Debug" }
filter { "system:windows", "configurations:release" }
- libdirs { target_deps.."/googletest/lib/vc14win64-cmake/Release", target_deps.."/physxsdk/bin/win.x86_64.vc141.md/release" }
- repo_build.copy_to_targetdir(target_deps.."/physxsdk/bin/win.x86_64.vc141.md/release/PhysXFoundation_64.dll")
- filter { "system:linux", "configurations:debug" }
- libdirs { target_deps.."/googletest/lib/gcc-4.8", target_deps.."/physxsdk/bin/linux.clang/debug" }
- filter { "system:linux", "configurations:release" }
- libdirs { target_deps.."/googletest/lib/gcc-4.8", target_deps.."/physxsdk/bin/linux.clang/release" }
+ libdirs { target_deps.."/googletest/lib/vc14win64-cmake/Release" }
+ filter { "system:linux" }
+ libdirs { target_deps.."/googletest/lib/gcc-4.8" }
filter{}
links { "gtest_main", "gtest" }
filter { "system:windows" }
- links { "PhysXFoundation_64", "PhysXTask_static_64" }
+ -- links { "PhysXFoundation_64", "PhysXTask_static_64" }
disablewarnings {
"4002", -- too many actual parameters for macro 'identifier'
"4100", -- unreferenced formal parameter
@@ -677,7 +675,7 @@ group "tests"
"4996", -- code uses a function, class member, variable, or typedef that's marked deprecated
}
filter { "system:linux"}
- links { "PhysXFoundation_static_64" }
+ -- links { "PhysXFoundation_static_64" }
disablewarnings {
"undef",
"sign-compare"
diff --git a/blast/repo.toml b/blast/repo.toml
index 741e0134e..d8c5d56e8 100644
--- a/blast/repo.toml
+++ b/blast/repo.toml
@@ -62,198 +62,3 @@ extra_premake_args = ["--python-version={}"]
[repo_format]
-# PUBLIC_EXCLUDE_BEGIN
-########################################################################################################################
-# Packaging
-########################################################################################################################
-
-[repo_package.packages.main_package]
-
-name = "${conf:repo.name}"
-
-root = "_build/${platform}/${config}/${conf:repo.name}"
-
-files = [
- ["**/include/**/*.*"],
- ["**/source/**/*.*"],
- ["**/PACKAGE-LICENSES/*.*"],
-]
-
-files_exclude = [
- ["**/include/extensions/exporter/**"],
- ["**/include/extensions/physx/**"],
- ["**/include/extensions/serialization/*ExtPx*.*"],
-]
-
-windows-x86_64.files = [
- ["**/bin/NvBlast*.dll"],
- ["**/bin/NvBlast*.lib"],
-]
-
-linux-x86_64.files = [
- ["**/bin/*NvBlast*.so"],
-]
-
-windows-x86_64.debug.files = [
- ["**/NvBlast*.pdb"],
- ["**/NvBlast*.exp"],
-]
-
-label_name = false
-
-version = "${env:BUILD_NUMBER}"
-package_per_config = true
-append_config = true
-version_separator = "."
-
-[repo_package.packages.test_package]
-
-name = "test-${conf:repo.name}"
-
-root = "_build/${platform}/${config}/${conf:repo.name}"
-
-windows-x86_64.files = [
- ["**/bin/PhysX*.dll"],
- ["**/bin/*Tests.exe"],
-]
-
-linux-x86_64.files = [
- ["**/bin/*Tests"],
-]
-
-label_name = false
-
-version = "${env:BUILD_NUMBER}"
-create_package_info = false
-package_per_config = true
-append_config = true
-version_separator = "."
-
-[repo_package.packages."platform:windows-x86_64".docs]
-omniverse_flow_version_scheme = true
-files = [
- ["_build/docs"]
-]
-
-
-########################################################################################################################
-# Build Number
-########################################################################################################################
-
-[repo_build_number]
-enabled = true
-
-
-########################################################################################################################
-# Package publishing to packman
-########################################################################################################################
-
-[repo_publish]
-enabled = true
-packages = ["blast-sdk"]
-default_remote = "cloudfront"
-
-
-########################################################################################################################
-# Documentation building
-########################################################################################################################
-
-[repo_docs]
-enabled = true
-
-config = "release"
-
-doxygen_aliases = [
-]
-
-repo_url = "https://gitlab-master.nvidia.com/omniverse/blast-sdk"
-
-project = "${conf:repo.name}"
-version = "${file:${root}/VERSION.md}"
-name = "blast-sdk"
-api_title = "blast-sdk-${file:${root}/VERSION.md}"
-copyright_start = 2017
-
-sphinx_exclude_patterns = [
- "_build",
- "_compiler",
- "_repo",
- "deps",
- "PACKAGE-LICENSES",
- "source",
- "test",
- "tools",
- "VERSION.md",
- "COVER.md",
- "NvPreprocessor.h",
-]
-
-doxygen_input = [
- "include/extensions/assetutils/NvBlastExtAssetUtils.h",
- "include/extensions/authoring/NvBlastExtAuthoring.h",
- "include/extensions/authoring/NvBlastExtAuthoringBondGenerator.h",
- "include/extensions/authoring/NvBlastExtAuthoringBooleanTool.h",
- "include/extensions/authoring/NvBlastExtAuthoringCutout.h",
- "include/extensions/authoring/NvBlastExtAuthoringFractureTool.h",
- "include/extensions/authoring/NvBlastExtAuthoringMeshCleaner.h",
- "include/extensions/authoringCommon/NvBlastExtAuthoringAccelerator.h",
- "include/extensions/authoringCommon/NvBlastExtAuthoringConvexMeshBuilder.h",
- "include/extensions/authoringCommon/NvBlastExtAuthoringMesh.h",
- "include/extensions/authoringCommon/NvBlastExtAuthoringPatternGenerator.h",
- "include/extensions/authoringCommon/NvBlastExtAuthoringTypes.h",
- "include/extensions/exporter/NvBlastExtExporter.h",
- "include/extensions/exporter/NvBlastExtExporterJsonCollision.h",
- "include/extensions/physx/NvBlastExtPx.h",
- "include/extensions/physx/NvBlastExtPxActor.h",
- "include/extensions/physx/NvBlastExtPxAsset.h",
- "include/extensions/physx/NvBlastExtPxCollisionBuilder.h",
- "include/extensions/physx/NvBlastExtPxFamily.h",
- "include/extensions/physx/NvBlastExtPxImpactDamageManager.h",
- "include/extensions/physx/NvBlastExtPxListener.h",
- "include/extensions/physx/NvBlastExtPxManager.h",
- "include/extensions/physx/NvBlastExtPxStressSolver.h",
- "include/extensions/physx/NvBlastExtPxSync.h",
- "include/extensions/physx/NvBlastExtPxTask.h",
- "include/extensions/physx/NvBlastPxCallbacks.h",
- "include/extensions/RT/NvBlastExtRT.h",
- "include/extensions/serialization/NvBlastExtLlSerialization.h",
- "include/extensions/serialization/NvBlastExtPxSerialization.h",
- "include/extensions/serialization/NvBlastExtSerialization.h",
- "include/extensions/serialization/NvBlastExtTkSerialization.h",
- "include/extensions/shaders/NvBlastExtDamageShaders.h",
- "include/extensions/stress/NvBlastExtStressSolver.h",
- "include/globals/NvBlastAllocator.h",
- "include/globals/NvBlastDebugRender.h",
- "include/globals/NvBlastGlobals.h",
- "include/globals/NvBlastProfiler.h",
- "include/globals/NvCMath.h",
- "include/lowlevel/NvBlast.h",
- "include/lowlevel/NvBlastTypes.h",
- "include/lowlevel/NvCTypes.h",
- "include/lowlevel/NvPreprocessor.h",
- "include/toolkit/NvBlastTk.h",
- "include/toolkit/NvBlastTkActor.h",
- "include/toolkit/NvBlastTkAsset.h",
- "include/toolkit/NvBlastTkEvent.h",
- "include/toolkit/NvBlastTkFamily.h",
- "include/toolkit/NvBlastTkFramework.h",
- "include/toolkit/NvBlastTkGroup.h",
- "include/toolkit/NvBlastTkIdentifiable.h",
- "include/toolkit/NvBlastTkJoint.h",
- "include/toolkit/NvBlastTkObject.h",
- "include/toolkit/NvBlastTkType.h",
-]
-
-doxygen_predefined = [
- "NVBLAST_API=",
- "NV_INLINE=inline",
- "NV_COMPILE_TIME_ASSERT(arg)=",
-]
-
-
-[repo_docs.editions.s3web]
-protocol = "s3"
-bucket_name = "omniverse-docs"
-bucket_dir = "${project}/${version}"
-
-# PUBLIC_EXCLUDE_END
\ No newline at end of file
diff --git a/blast/source/sdk/common/NvBlastArray.h b/blast/source/sdk/common/NvBlastArray.h
index b8c5062c5..8866664c3 100644
--- a/blast/source/sdk/common/NvBlastArray.h
+++ b/blast/source/sdk/common/NvBlastArray.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTARRAY_H
@@ -30,7 +30,7 @@
#include "NvBlastAllocator.h"
-#include "PsInlineArray.h"
+#include "NsInlineArray.h"
namespace Nv
@@ -39,24 +39,24 @@ namespace Blast
{
/**
-Wrapped PxShared Array that uses NvBlastGlobals AllocatorCalllback.
+Wrapped NvShared Array that uses NvBlastGlobals AllocatorCallback.
*/
template
struct Array
{
- typedef physx::shdfnd::Array type;
+ typedef nvidia::shdfnd::Array type;
};
/**
-Wrapped PxShared InlineArray that uses NvBlastGlobals AllocatorCalllback.
+Wrapped NvShared InlineArray that uses NvBlastGlobals AllocatorCallback.
InlineArraya is array that pre-allocates for N elements.
*/
template
struct InlineArray
{
- typedef physx::shdfnd::InlineArray type;
+ typedef nvidia::shdfnd::InlineArray type;
};
} // namespace Blast
diff --git a/blast/source/sdk/common/NvBlastAssert.cpp b/blast/source/sdk/common/NvBlastAssert.cpp
index 8a720b362..ecf177d4a 100644
--- a/blast/source/sdk/common/NvBlastAssert.cpp
+++ b/blast/source/sdk/common/NvBlastAssert.cpp
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAssert.h"
diff --git a/blast/source/sdk/common/NvBlastAssert.h b/blast/source/sdk/common/NvBlastAssert.h
index 4073ed60d..bf20f0dc3 100644
--- a/blast/source/sdk/common/NvBlastAssert.h
+++ b/blast/source/sdk/common/NvBlastAssert.h
@@ -22,14 +22,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTASSERT_H
#define NVBLASTASSERT_H
-#include "NvBlastPreprocessor.h"
+#include "NvPreprocessor.h"
#if !NV_ENABLE_ASSERTS
@@ -72,7 +72,7 @@
extern "C"
{
-NVBLAST_API void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore);
+NV_C_API void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore);
} // extern "C"
diff --git a/blast/source/sdk/common/NvBlastAtomic.cpp b/blast/source/sdk/common/NvBlastAtomic.cpp
index 91c7f7523..df206f089 100644
--- a/blast/source/sdk/common/NvBlastAtomic.cpp
+++ b/blast/source/sdk/common/NvBlastAtomic.cpp
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAtomic.h"
diff --git a/blast/source/sdk/common/NvBlastAtomic.h b/blast/source/sdk/common/NvBlastAtomic.h
index 14d70ebff..8941e7cd5 100644
--- a/blast/source/sdk/common/NvBlastAtomic.h
+++ b/blast/source/sdk/common/NvBlastAtomic.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTATOMIC_H
diff --git a/blast/source/sdk/common/NvBlastDLink.h b/blast/source/sdk/common/NvBlastDLink.h
index a4f03b525..23641499b 100644
--- a/blast/source/sdk/common/NvBlastDLink.h
+++ b/blast/source/sdk/common/NvBlastDLink.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTDLINK_H
diff --git a/blast/source/sdk/common/NvBlastFixedArray.h b/blast/source/sdk/common/NvBlastFixedArray.h
index f656cb039..d185332ea 100644
--- a/blast/source/sdk/common/NvBlastFixedArray.h
+++ b/blast/source/sdk/common/NvBlastFixedArray.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDARRAY_H
diff --git a/blast/source/sdk/common/NvBlastFixedBitmap.h b/blast/source/sdk/common/NvBlastFixedBitmap.h
index ddf4b3919..b24f2ac6b 100644
--- a/blast/source/sdk/common/NvBlastFixedBitmap.h
+++ b/blast/source/sdk/common/NvBlastFixedBitmap.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDBITMAP_H
diff --git a/blast/source/sdk/common/NvBlastFixedBoolArray.h b/blast/source/sdk/common/NvBlastFixedBoolArray.h
index 1ec1832d4..c6ecdf34f 100644
--- a/blast/source/sdk/common/NvBlastFixedBoolArray.h
+++ b/blast/source/sdk/common/NvBlastFixedBoolArray.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDBOOLARRAY_H
diff --git a/blast/source/sdk/common/NvBlastFixedPriorityQueue.h b/blast/source/sdk/common/NvBlastFixedPriorityQueue.h
index a184472b2..2dc2d3657 100644
--- a/blast/source/sdk/common/NvBlastFixedPriorityQueue.h
+++ b/blast/source/sdk/common/NvBlastFixedPriorityQueue.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDPRIORITYQUEUE_H
diff --git a/blast/source/sdk/common/NvBlastFixedQueue.h b/blast/source/sdk/common/NvBlastFixedQueue.h
index d4e4232da..d2a03f8c3 100644
--- a/blast/source/sdk/common/NvBlastFixedQueue.h
+++ b/blast/source/sdk/common/NvBlastFixedQueue.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTFIXEDQUEUE_H
diff --git a/blast/source/sdk/common/NvBlastGeometry.h b/blast/source/sdk/common/NvBlastGeometry.h
index 51407add0..39ca007a6 100644
--- a/blast/source/sdk/common/NvBlastGeometry.h
+++ b/blast/source/sdk/common/NvBlastGeometry.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTGEOMETRY_H
diff --git a/blast/source/sdk/common/NvBlastHashMap.h b/blast/source/sdk/common/NvBlastHashMap.h
index 99629f027..c39f9f3e9 100644
--- a/blast/source/sdk/common/NvBlastHashMap.h
+++ b/blast/source/sdk/common/NvBlastHashMap.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTHASHMAP_H
@@ -30,7 +30,7 @@
#include "NvBlastAllocator.h"
-#include "PsHashMap.h"
+#include "NsHashMap.h"
namespace Nv
@@ -39,12 +39,12 @@ namespace Blast
{
/**
-Wrapped PxShared HashMap that uses NvBlastGlobals AllocatorCalllback.
+Wrapped NvShared HashMap that uses NvBlastGlobals AllocatorCallback.
*/
-template >
+template >
struct HashMap
{
- typedef physx::shdfnd::HashMap type;
+ typedef nvidia::shdfnd::HashMap type;
};
} // namespace Blast
diff --git a/blast/source/sdk/common/NvBlastHashSet.h b/blast/source/sdk/common/NvBlastHashSet.h
index c63287ad5..266baeb07 100644
--- a/blast/source/sdk/common/NvBlastHashSet.h
+++ b/blast/source/sdk/common/NvBlastHashSet.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTHASHSET_H
@@ -30,7 +30,7 @@
#include "NvBlastAllocator.h"
-#include "PsHashSet.h"
+#include "NsHashSet.h"
namespace Nv
{
@@ -38,12 +38,12 @@ namespace Blast
{
/**
-Wrapped PxShared HashSet that uses NvBlastGlobals AllocatorCalllback.
+Wrapped NvShared HashSet that uses NvBlastGlobals AllocatorCallback.
*/
-template >
+template >
struct HashSet
{
- typedef physx::shdfnd::HashSet type;
+ typedef nvidia::shdfnd::HashSet type;
};
} // namespace Blast
diff --git a/blast/source/sdk/common/NvBlastIncludeWindows.h b/blast/source/sdk/common/NvBlastIncludeWindows.h
index 19041e0a5..6c8639017 100644
--- a/blast/source/sdk/common/NvBlastIncludeWindows.h
+++ b/blast/source/sdk/common/NvBlastIncludeWindows.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTINCLUDEWINDOWS_H
@@ -30,7 +30,7 @@
#ifndef _WINDOWS_ // windows already included if this is defined
-#include "NvBlastPreprocessor.h"
+#include "NvPreprocessor.h"
#ifndef _WIN32
#error "This file should only be included by Windows builds!!"
diff --git a/blast/source/sdk/common/NvBlastIndexFns.h b/blast/source/sdk/common/NvBlastIndexFns.h
index 8739d4aec..5ef924bc2 100644
--- a/blast/source/sdk/common/NvBlastIndexFns.h
+++ b/blast/source/sdk/common/NvBlastIndexFns.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2008-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTINDEXFNS_H
diff --git a/blast/source/sdk/common/NvBlastIteratorBase.h b/blast/source/sdk/common/NvBlastIteratorBase.h
index 278367ce3..5fd742a83 100644
--- a/blast/source/sdk/common/NvBlastIteratorBase.h
+++ b/blast/source/sdk/common/NvBlastIteratorBase.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTITERATORBASE_H
diff --git a/blast/source/sdk/common/NvBlastMath.h b/blast/source/sdk/common/NvBlastMath.h
index 9fd4a01a7..d60b97860 100644
--- a/blast/source/sdk/common/NvBlastMath.h
+++ b/blast/source/sdk/common/NvBlastMath.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTMATH_H
diff --git a/blast/source/sdk/common/NvBlastMemory.h b/blast/source/sdk/common/NvBlastMemory.h
index 4ea626843..b369a8593 100644
--- a/blast/source/sdk/common/NvBlastMemory.h
+++ b/blast/source/sdk/common/NvBlastMemory.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTMEMORY_H
@@ -128,7 +128,4 @@ align16(_lastOffset + _lastSize)
#define NvBlastAlloca(x) alloca(x)
#endif
-#define NvBlastAllocaAligned16(x) (void*)(((uintptr_t)PxAlloca(x + 0xF) + 0xF) & ~(uintptr_t)0xF)
-
-
#endif // #ifndef NVBLASTMEMORY_H
diff --git a/blast/source/sdk/common/NvBlastPxSharedHelpers.h b/blast/source/sdk/common/NvBlastNvSharedHelpers.h
similarity index 69%
rename from blast/source/sdk/common/NvBlastPxSharedHelpers.h
rename to blast/source/sdk/common/NvBlastNvSharedHelpers.h
index e94475c5e..966e949a2 100644
--- a/blast/source/sdk/common/NvBlastPxSharedHelpers.h
+++ b/blast/source/sdk/common/NvBlastNvSharedHelpers.h
@@ -22,106 +22,109 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
-#ifndef NVBLASTPXSHAREDTYPESHELPERS_H
-#define NVBLASTPXSHAREDTYPESHELPERS_H
+#ifndef NVBLASTNVSHAREDSHELPERS_H
+#define NVBLASTNVSHAREDSHELPERS_H
#include "NvCTypes.h"
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
+
+#include "NvVec2.h"
+#include "NvVec3.h"
+#include "NvVec4.h"
+#include "NvTransform.h"
+#include "NvPlane.h"
+#include "NvMat33.h"
+#include "NvMat44.h"
+#include "NvBounds3.h"
+
+using namespace nvidia;
#define WCast(type, name) reinterpret_cast(name)
#define RCast(type, name) reinterpret_cast(name)
-#define CONVERT(BlastType, PxSharedType) \
- static inline PxSharedType& toPxShared(BlastType& v) \
+#define CONVERT(BlastType, NvSharedType) \
+ static inline NvSharedType& toNvShared(BlastType& v) \
{ \
- return WCast(PxSharedType&, v); \
+ return WCast(NvSharedType&, v); \
} \
- static inline const PxSharedType& toPxShared(const BlastType& v) \
+ static inline const NvSharedType& toNvShared(const BlastType& v) \
{ \
- return RCast(PxSharedType&, v); \
+ return RCast(NvSharedType&, v); \
} \
- static inline const BlastType& fromPxShared(const PxSharedType& v) \
+ static inline const BlastType& fromNvShared(const NvSharedType& v) \
{ \
return RCast(BlastType&, v); \
} \
- static inline BlastType& fromPxShared(PxSharedType& v) \
+ static inline BlastType& fromNvShared(NvSharedType& v) \
{ \
return WCast(BlastType&, v); \
} \
- static inline PxSharedType* toPxShared(BlastType* v) \
+ static inline NvSharedType* toNvShared(BlastType* v) \
{ \
- return WCast(PxSharedType*, v); \
+ return WCast(NvSharedType*, v); \
} \
- static inline const PxSharedType* toPxShared(const BlastType* v) \
+ static inline const NvSharedType* toNvShared(const BlastType* v) \
{ \
- return RCast(PxSharedType*, v); \
+ return RCast(NvSharedType*, v); \
} \
- static inline const BlastType* fromPxShared(const PxSharedType* v) \
+ static inline const BlastType* fromNvShared(const NvSharedType* v) \
{ \
return RCast(BlastType*, v); \
} \
- static inline BlastType* fromPxShared(PxSharedType* v) \
+ static inline BlastType* fromNvShared(NvSharedType* v) \
{ \
return WCast(BlastType*, v); \
}
-CONVERT(NvcVec2, physx::PxVec2)
-CONVERT(NvcVec3, physx::PxVec3)
-CONVERT(NvcVec4, physx::PxVec4)
-CONVERT(NvcQuat, physx::PxQuat)
-CONVERT(NvcTransform, physx::PxTransform)
-CONVERT(NvcPlane, physx::PxPlane)
-CONVERT(NvcMat33, physx::PxMat33)
-CONVERT(NvcMat44, physx::PxMat44)
-CONVERT(NvcBounds3, physx::PxBounds3)
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcVec2) == sizeof(physx::PxVec2));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, x) == NV_OFFSET_OF(physx::PxVec2, x));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, y) == NV_OFFSET_OF(physx::PxVec2, y));
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcVec3) == sizeof(physx::PxVec3));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, x) == NV_OFFSET_OF(physx::PxVec3, x));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, y) == NV_OFFSET_OF(physx::PxVec3, y));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, z) == NV_OFFSET_OF(physx::PxVec3, z));
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcVec4) == sizeof(physx::PxVec4));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, x) == NV_OFFSET_OF(physx::PxVec4, x));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, y) == NV_OFFSET_OF(physx::PxVec4, y));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, z) == NV_OFFSET_OF(physx::PxVec4, z));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, w) == NV_OFFSET_OF(physx::PxVec4, w));
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcQuat) == sizeof(physx::PxQuat));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, x) == NV_OFFSET_OF(physx::PxQuat, x));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, y) == NV_OFFSET_OF(physx::PxQuat, y));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, z) == NV_OFFSET_OF(physx::PxQuat, z));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, w) == NV_OFFSET_OF(physx::PxQuat, w));
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcTransform) == sizeof(physx::PxTransform));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, p) == NV_OFFSET_OF(physx::PxTransform, p));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, q) == NV_OFFSET_OF(physx::PxTransform, q));
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcPlane) == sizeof(physx::PxPlane));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, n) == NV_OFFSET_OF(physx::PxPlane, n));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, d) == NV_OFFSET_OF(physx::PxPlane, d));
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcMat33) == sizeof(physx::PxMat33));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column0) == NV_OFFSET_OF(physx::PxMat33, column0));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column1) == NV_OFFSET_OF(physx::PxMat33, column1));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column2) == NV_OFFSET_OF(physx::PxMat33, column2));
-
-NV_COMPILE_TIME_ASSERT(sizeof(NvcBounds3) == sizeof(physx::PxBounds3));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, minimum) == NV_OFFSET_OF(physx::PxBounds3, minimum));
-NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, maximum) == NV_OFFSET_OF(physx::PxBounds3, maximum));
-
-#endif // #ifndef NVBLASTPHYSXTYPESHELPERS_H
+CONVERT(NvcVec2, nvidia::NvVec2)
+CONVERT(NvcVec3, nvidia::NvVec3)
+CONVERT(NvcVec4, nvidia::NvVec4)
+CONVERT(NvcQuat, nvidia::NvQuat)
+CONVERT(NvcTransform, nvidia::NvTransform)
+CONVERT(NvcPlane, nvidia::NvPlane)
+CONVERT(NvcMat33, nvidia::NvMat33)
+CONVERT(NvcMat44, nvidia::NvMat44)
+CONVERT(NvcBounds3, nvidia::NvBounds3)
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcVec2) == sizeof(nvidia::NvVec2));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, x) == NV_OFFSET_OF(nvidia::NvVec2, x));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, y) == NV_OFFSET_OF(nvidia::NvVec2, y));
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcVec3) == sizeof(nvidia::NvVec3));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, x) == NV_OFFSET_OF(nvidia::NvVec3, x));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, y) == NV_OFFSET_OF(nvidia::NvVec3, y));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, z) == NV_OFFSET_OF(nvidia::NvVec3, z));
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcVec4) == sizeof(nvidia::NvVec4));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, x) == NV_OFFSET_OF(nvidia::NvVec4, x));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, y) == NV_OFFSET_OF(nvidia::NvVec4, y));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, z) == NV_OFFSET_OF(nvidia::NvVec4, z));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, w) == NV_OFFSET_OF(nvidia::NvVec4, w));
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcQuat) == sizeof(nvidia::NvQuat));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, x) == NV_OFFSET_OF(nvidia::NvQuat, x));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, y) == NV_OFFSET_OF(nvidia::NvQuat, y));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, z) == NV_OFFSET_OF(nvidia::NvQuat, z));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, w) == NV_OFFSET_OF(nvidia::NvQuat, w));
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcTransform) == sizeof(nvidia::NvTransform));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, p) == NV_OFFSET_OF(nvidia::NvTransform, p));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, q) == NV_OFFSET_OF(nvidia::NvTransform, q));
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcPlane) == sizeof(nvidia::NvPlane));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, n) == NV_OFFSET_OF(nvidia::NvPlane, n));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, d) == NV_OFFSET_OF(nvidia::NvPlane, d));
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcMat33) == sizeof(nvidia::NvMat33));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column0) == NV_OFFSET_OF(nvidia::NvMat33, column0));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column1) == NV_OFFSET_OF(nvidia::NvMat33, column1));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column2) == NV_OFFSET_OF(nvidia::NvMat33, column2));
+
+NV_COMPILE_TIME_ASSERT(sizeof(NvcBounds3) == sizeof(nvidia::NvBounds3));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, minimum) == NV_OFFSET_OF(nvidia::NvBounds3, minimum));
+NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, maximum) == NV_OFFSET_OF(nvidia::NvBounds3, maximum));
+
+#endif // #ifndef NVBLASTNVSHAREDSHELPERS_H
diff --git a/blast/source/sdk/common/NvBlastPreprocessorInternal.h b/blast/source/sdk/common/NvBlastPreprocessorInternal.h
index 99dbd203a..803a82bdd 100644
--- a/blast/source/sdk/common/NvBlastPreprocessorInternal.h
+++ b/blast/source/sdk/common/NvBlastPreprocessorInternal.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTPREPROCESSORINTERNAL_H
diff --git a/blast/source/sdk/common/NvBlastTime.cpp b/blast/source/sdk/common/NvBlastTime.cpp
index 48ab2b817..e0ede6f14 100644
--- a/blast/source/sdk/common/NvBlastTime.cpp
+++ b/blast/source/sdk/common/NvBlastTime.cpp
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastTime.h"
diff --git a/blast/source/sdk/common/NvBlastTime.h b/blast/source/sdk/common/NvBlastTime.h
index 12af5ac3f..c8a59ace9 100644
--- a/blast/source/sdk/common/NvBlastTime.h
+++ b/blast/source/sdk/common/NvBlastTime.h
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTTIME_H
diff --git a/blast/source/sdk/common/NvBlastTimers.cpp b/blast/source/sdk/common/NvBlastTimers.cpp
index 1bdfc2312..fa1f4153f 100644
--- a/blast/source/sdk/common/NvBlastTimers.cpp
+++ b/blast/source/sdk/common/NvBlastTimers.cpp
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlast.h"
diff --git a/blast/source/sdk/common/NvBlastVolumeIntegrals.h b/blast/source/sdk/common/NvBlastVolumeIntegrals.h
index ec9c04732..d3a812fcd 100644
--- a/blast/source/sdk/common/NvBlastVolumeIntegrals.h
+++ b/blast/source/sdk/common/NvBlastVolumeIntegrals.h
@@ -22,13 +22,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTVOLUMEINTEGRALS_H
#define NVBLASTVOLUMEINTEGRALS_H
-#include "NvBlastPxSharedHelpers.h"
+#include "NvBlastNvSharedHelpers.h"
#include "NvCMath.h"
#include "NvBlastAssert.h"
diff --git a/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp b/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp
index 0eb0fcdbe..f83d8de1e 100644
--- a/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp
+++ b/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtAssetUtils.h"
diff --git a/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.cpp b/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.cpp
index 3af7ef7af..3f953b459 100644
--- a/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.cpp
+++ b/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.cpp
@@ -22,22 +22,24 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtApexSharedParts.h"
#include "NvBlastGlobals.h"
#include "NvBlastMemory.h"
+#include "NvBlastAssert.h"
-#include "foundation/PxMat44.h"
-#include "foundation/PxBounds3.h"
-#include "PxFoundation.h"
-#include "PsVecMath.h"
+#include "NsVecMath.h"
+
+#include "NvMat44.h"
+#include "NvBounds3.h"
+#include "NsVecMath.h"
#include
-using namespace physx;
-using namespace physx::shdfnd::aos;
+using namespace nvidia;
+using namespace nvidia::shdfnd::aos;
namespace Nv
@@ -45,7 +47,7 @@ namespace Nv
namespace Blast
{
-PX_NOALIAS PX_FORCE_INLINE BoolV PointOutsideOfPlane4(const Vec3VArg _a, const Vec3VArg _b, const Vec3VArg _c, const Vec3VArg _d)
+NV_NOALIAS NV_FORCE_INLINE BoolV PointOutsideOfPlane4(const Vec3VArg _a, const Vec3VArg _b, const Vec3VArg _c, const Vec3VArg _d)
{
// this is not 0 because of the following scenario:
// All the points lie on the same plane and the plane goes through the origin (0,0,0).
@@ -81,7 +83,7 @@ PX_NOALIAS PX_FORCE_INLINE BoolV PointOutsideOfPlane4(const Vec3VArg _a, const V
return V4IsGrtrOrEq(V4Mul(signa, signd), zero);//same side, outside of the plane
}
-PX_NOALIAS PX_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg a, const Vec3VArg b)
+NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg a, const Vec3VArg b)
{
const FloatV zero = FZero();
const FloatV one = FOne();
@@ -98,8 +100,8 @@ PX_NOALIAS PX_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg a, const V
return V3Sel(con, a, V3ScaleAdd(ab, t, a));
}
-PX_NOALIAS PX_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
- const Vec3VArg B0, const Vec3VArg B1, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
+ const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
const Vec3V a = Q0;
const Vec3V b = Q1;
@@ -135,8 +137,8 @@ PX_NOALIAS PX_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg Q0, const
return V3Sub(tempClosestA, tempClosestB);
}
-PX_NOALIAS Vec3V closestPtPointSegmentTesselation(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
- const Vec3VArg B0, const Vec3VArg B1, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+NV_NOALIAS Vec3V closestPtPointSegmentTesselation(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
+ const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
const FloatV half = FHalf();
@@ -186,7 +188,7 @@ PX_NOALIAS Vec3V closestPtPointSegmentTesselation(const Vec3VArg Q0, const Vec3V
return closestPtPointSegment(q0, q1, a0, a1, b0, b1, size, closestA, closestB);
}
-PX_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* PX_RESTRICT Q, const Vec3V* PX_RESTRICT A, const Vec3V* PX_RESTRICT B, const PxU32* PX_RESTRICT indices, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+NV_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* NV_RESTRICT Q, const Vec3V* NV_RESTRICT A, const Vec3V* NV_RESTRICT B, const uint32_t* NV_RESTRICT indices, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
size = 3;
const FloatV zero = FZero();
@@ -196,9 +198,9 @@ PX_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* PX_RESTRICT Q, c
const FloatV four = FLoad(4.f);
const FloatV sixty = FLoad(100.f);
- const PxU32 ind0 = indices[0];
- const PxU32 ind1 = indices[1];
- const PxU32 ind2 = indices[2];
+ const uint32_t ind0 = indices[0];
+ const uint32_t ind1 = indices[1];
+ const uint32_t ind2 = indices[2];
const Vec3V a = Q[ind0];
const Vec3V b = Q[ind1];
@@ -344,7 +346,7 @@ PX_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* PX_RESTRICT Q, c
//calculate the triangle normal
const Vec3V triNormal = V3Normalize(w);
- PX_ASSERT(V3AllEq(triNormal, V3Zero()) == 0);
+ NVBLAST_ASSERT(V3AllEq(triNormal, V3Zero()) == 0);
//split the longest edge
@@ -476,13 +478,13 @@ PX_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* PX_RESTRICT Q, c
return V3Sub(tempClosestA, tempClosestB);
}
-PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3V* PX_RESTRICT A, Vec3V* PX_RESTRICT B, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+NV_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
const FloatV eps = FEps();
const Vec3V zeroV = V3Zero();
- PxU32 tempSize = size;
+ uint32_t tempSize = size;
- FloatV bestSqDist = FLoad(PX_MAX_REAL);
+ FloatV bestSqDist = FLoad(NV_MAX_F32);
const Vec3V a = Q[0];
const Vec3V b = Q[1];
const Vec3V c = Q[2];
@@ -501,7 +503,7 @@ PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3
if (FAllGrtr(eps, fMin))
{
size = 3;
- PxU32 tempIndices[] = { 0, 1, 2 };
+ uint32_t tempIndices[] = { 0, 1, 2 };
return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB);
}
@@ -509,7 +511,7 @@ PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3
Vec3V _A[] = { A[0], A[1], A[2], A[3] };
Vec3V _B[] = { B[0], B[1], B[2], B[3] };
- PxU32 indices[3] = { 0, 1, 2 };
+ uint32_t indices[3] = { 0, 1, 2 };
const BoolV bIsOutside4 = PointOutsideOfPlane4(a, b, c, d);
@@ -525,8 +527,8 @@ PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3
if (BAllEq(BGetX(bIsOutside4), bTrue))
{
- PxU32 tempIndices[] = { 0, 1, 2 };
- PxU32 _size = 3;
+ uint32_t tempIndices[] = { 0, 1, 2 };
+ uint32_t _size = 3;
result = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
@@ -545,9 +547,9 @@ PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3
if (BAllEq(BGetY(bIsOutside4), bTrue))
{
- PxU32 tempIndices[] = { 0, 2, 3 };
+ uint32_t tempIndices[] = { 0, 2, 3 };
- PxU32 _size = 3;
+ uint32_t _size = 3;
const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
@@ -570,8 +572,8 @@ PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3
if (BAllEq(BGetZ(bIsOutside4), bTrue))
{
- PxU32 tempIndices[] = { 0, 3, 1 };
- PxU32 _size = 3;
+ uint32_t tempIndices[] = { 0, 3, 1 };
+ uint32_t _size = 3;
const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
@@ -594,8 +596,8 @@ PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3
if (BAllEq(BGetW(bIsOutside4), bTrue))
{
- PxU32 tempIndices[] = { 1, 3, 2 };
- PxU32 _size = 3;
+ uint32_t tempIndices[] = { 1, 3, 2 };
+ uint32_t _size = 3;
const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
@@ -626,8 +628,8 @@ PX_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* PX_RESTRICT Q, Vec3
return result;
}
-PX_NOALIAS PX_FORCE_INLINE Vec3V doTesselation(Vec3V* PX_RESTRICT Q, Vec3V* PX_RESTRICT A, Vec3V* PX_RESTRICT B,
- const Vec3VArg support, const Vec3VArg supportA, const Vec3VArg supportB, PxU32& size, Vec3V& closestA, Vec3V& closestB)
+NV_NOALIAS NV_FORCE_INLINE Vec3V doTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B,
+ const Vec3VArg support, const Vec3VArg supportA, const Vec3VArg supportB, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
switch (size)
{
@@ -644,7 +646,7 @@ PX_NOALIAS PX_FORCE_INLINE Vec3V doTesselation(Vec3V* PX_RESTRICT Q, Vec3V* PX_R
case 3:
{
- PxU32 tempIndices[3] = { 0, 1, 2 };
+ uint32_t tempIndices[3] = { 0, 1, 2 };
return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB);
}
case 4:
@@ -652,7 +654,7 @@ PX_NOALIAS PX_FORCE_INLINE Vec3V doTesselation(Vec3V* PX_RESTRICT Q, Vec3V* PX_R
return closestPtPointTetrahedronTesselation(Q, A, B, size, closestA, closestB);
}
default:
- PX_ASSERT(0);
+ NVBLAST_ASSERT(0);
}
return support;
}
@@ -670,7 +672,7 @@ enum Status
struct Output
{
/// Get the normal to push apart in direction from A to B
- PX_FORCE_INLINE Vec3V getNormal() const { return V3Normalize(V3Sub(mClosestB, mClosestA)); }
+ NV_FORCE_INLINE Vec3V getNormal() const { return V3Normalize(V3Sub(mClosestB, mClosestA)); }
Vec3V mClosestA; ///< Closest point on A
Vec3V mClosestB; ///< Closest point on B
FloatV mDistSq;
@@ -678,7 +680,7 @@ struct Output
struct ConvexV
{
- void calcExtent(const Vec3V& dir, PxF32& minOut, PxF32& maxOut) const
+ void calcExtent(const Vec3V& dir, float& minOut, float& maxOut) const
{
// Expand
const Vec4V x = Vec4V_From_FloatV(V3GetX(dir));
@@ -709,9 +711,9 @@ struct ConvexV
const Vec4V y = Vec4V_From_FloatV(V3GetY(dir));
const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir));
- PX_ALIGN(16, static const PxF32 index4const[]) = { 0.0f, 1.0f, 2.0f, 3.0f };
+ NV_ALIGN(16, static const float index4const[]) = { 0.0f, 1.0f, 2.0f, 3.0f };
Vec4V index4 = *(const Vec4V*)index4const;
- PX_ALIGN(16, static const PxF32 delta4const[]) = { 4.0f, 4.0f, 4.0f, 4.0f };
+ NV_ALIGN(16, static const float delta4const[]) = { 4.0f, 4.0f, 4.0f, 4.0f };
const Vec4V delta4 = *(const Vec4V*)delta4const;
const Vec4V* src = mAovVertices;
@@ -732,23 +734,23 @@ struct ConvexV
index4 = V4Add(index4, delta4);
}
Vec4V horiMax = Vec4V_From_FloatV(V4ExtractMax(max));
- PxU32 mask = BGetBitMask(V4IsEq(horiMax, max));
- const PxU32 simdIndex = (0x12131210 >> (mask + mask)) & PxU32(3);
+ uint32_t mask = BGetBitMask(V4IsEq(horiMax, max));
+ const uint32_t simdIndex = (0x12131210 >> (mask + mask)) & uint32_t(3);
/// NOTE! Could be load hit store
/// Would be better to have all simd.
- PX_ALIGN(16, PxF32 f[4]);
+ NV_ALIGN(16, float f[4]);
V4StoreA(maxIndex, f);
- PxU32 index = PxU32(PxI32(f[simdIndex]));
+ uint32_t index = uint32_t(uint32_t(f[simdIndex]));
const Vec4V* aovIndex = (mAovVertices + (index >> 2) * 3);
- const PxF32* aovOffset = ((const PxF32*)aovIndex) + (index & 3);
+ const float* aovOffset = ((const float*)aovIndex) + (index & 3);
return Vec3V_From_Vec4V(V4LoadXYZW(aovOffset[0], aovOffset[4], aovOffset[8], 1.0f));
}
const Vec4V* mAovVertices; ///< Vertices storex x,x,x,x, y,y,y,y, z,z,z,z
- PxU32 mNumAovVertices; ///< Number of groups of 4 of vertices
+ uint32_t mNumAovVertices; ///< Number of groups of 4 of vertices
};
Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bToA, const ConvexV& convexB, Output& out)
@@ -759,7 +761,7 @@ Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bT
Mat33V aToB = M34Trnsps33(bToA);
- PxU32 size = 0;
+ uint32_t size = 0;
const Vec3V zeroV = V3Zero();
const BoolV bTrue = BTTTT();
@@ -788,8 +790,8 @@ Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bT
closAA = closA;
closBB = closB;
- PxU32 index = size++;
- PX_ASSERT(index < 4);
+ uint32_t index = size++;
+ NVBLAST_ASSERT(index < 4);
const Vec3V supportA = convexA.calcSupport(V3Neg(v));
const Vec3V supportB = M34MulV3(bToA, convexB.calcSupport(M33MulV3(aToB, v)));
@@ -823,7 +825,7 @@ Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bT
return Status(BAllEq(bCon, bTrue) == 1 ? STATUS_CONTACT : STATUS_DEGENERATE);
}
-static void _calcSeparation(const ConvexV& convexA, const physx::PxTransform& aToWorldIn, const Mat34V& bToA, ConvexV& convexB, const Vec3V& centroidAToB, Output& out, Separation& sep)
+static void _calcSeparation(const ConvexV& convexA, const nvidia::NvTransform& aToWorldIn, const Mat34V& bToA, ConvexV& convexB, const Vec3V& centroidAToB, Output& out, Separation& sep)
{
Mat33V aToB = M34Trnsps33(bToA);
@@ -848,7 +850,7 @@ static void _calcSeparation(const ConvexV& convexA, const physx::PxTransform& aT
{
// Offset the min max taking into account transform
// Distance of origin from B's space in As space in direction of the normal in As space should fix it...
- PxF32 fix;
+ float fix;
FStore(V3Dot(bToA.col3, normalA), &fix);
sep.min1 += fix;
sep.max1 += fix;
@@ -858,7 +860,7 @@ static void _calcSeparation(const ConvexV& convexA, const physx::PxTransform& aT
Vec3V center = V3Scale(V3Add(out.mClosestA, out.mClosestB), FLoad(0.5f));
// Transform to world space
Mat34V aToWorld;
- *(PxMat44*)&aToWorld = aToWorldIn;
+ *(NvMat44*)&aToWorld = aToWorldIn;
// Put the normal in world space
Vec3V worldCenter = M34MulV3(aToWorld, center);
Vec3V worldNormal = M34Mul33V3(aToWorld, normalA);
@@ -869,10 +871,10 @@ static void _calcSeparation(const ConvexV& convexA, const physx::PxTransform& aT
sep.plane.d = -sep.plane.d;
}
-static void _arrayVec3ToVec4(const PxVec3* src, Vec4V* dst, PxU32 num)
+static void _arrayVec3ToVec4(const NvVec3* src, Vec4V* dst, uint32_t num)
{
- const PxU32 num4 = num >> 2;
- for (PxU32 i = 0; i < num4; i++, dst += 3, src += 4)
+ const uint32_t num4 = num >> 2;
+ for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4)
{
Vec3V v0 = V3LoadU(&src[0].x);
Vec3V v1 = V3LoadU(&src[1].x);
@@ -885,11 +887,11 @@ static void _arrayVec3ToVec4(const PxVec3* src, Vec4V* dst, PxU32 num)
dst[1] = v1;
dst[2] = v2;
}
- const PxU32 remain = num & 3;
+ const uint32_t remain = num & 3;
if (remain)
{
Vec3V work[4];
- PxU32 i = 0;
+ uint32_t i = 0;
for (; i < remain; i++) work[i] = V3LoadU(&src[i].x);
for (; i < 4; i++) work[i] = work[remain - 1];
V4Transpose(work[0], work[1], work[2], work[3]);
@@ -900,7 +902,7 @@ static void _arrayVec3ToVec4(const PxVec3* src, Vec4V* dst, PxU32 num)
}
-static void _arrayVec3ToVec4(const PxVec3* src, const Vec3V& scale, Vec4V* dst, PxU32 num)
+static void _arrayVec3ToVec4(const NvVec3* src, const Vec3V& scale, Vec4V* dst, uint32_t num)
{
// If no scale - use the faster version
if (V3AllEq(scale, V3One()))
@@ -908,8 +910,8 @@ static void _arrayVec3ToVec4(const PxVec3* src, const Vec3V& scale, Vec4V* dst,
return _arrayVec3ToVec4(src, dst, num);
}
- const PxU32 num4 = num >> 2;
- for (PxU32 i = 0; i < num4; i++, dst += 3, src += 4)
+ const uint32_t num4 = num >> 2;
+ for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4)
{
Vec3V v0 = V3Mul(scale, V3LoadU(&src[0].x));
Vec3V v1 = V3Mul(scale, V3LoadU(&src[1].x));
@@ -922,11 +924,11 @@ static void _arrayVec3ToVec4(const PxVec3* src, const Vec3V& scale, Vec4V* dst,
dst[1] = v1;
dst[2] = v2;
}
- const PxU32 remain = num & 3;
+ const uint32_t remain = num & 3;
if (remain)
{
Vec3V work[4];
- PxU32 i = 0;
+ uint32_t i = 0;
for (; i < remain; i++) work[i] = V3Mul(scale, V3LoadU(&src[i].x));
for (; i < 4; i++) work[i] = work[remain - 1];
V4Transpose(work[0], work[1], work[2], work[3]);
@@ -976,35 +978,35 @@ struct ScopeMemoryAllocator {
_out = (buffSize < STACK_ALLOC_LIMIT ? NvBlastAlloca(buffSize) : _out##Allocator.alloc(buffSize))
-bool importerHullsInProximityApexFree(uint32_t hull0Count, const PxVec3* hull0, PxBounds3& hull0Bounds, const physx::PxTransform& localToWorldRT0In, const physx::PxVec3& scale0In,
- uint32_t hull1Count, const PxVec3* hull1, PxBounds3& hull1Bounds, const physx::PxTransform& localToWorldRT1In, const physx::PxVec3& scale1In,
- physx::PxF32 maxDistance, Separation* separation)
+bool importerHullsInProximityApexFree(uint32_t hull0Count, const NvVec3* hull0, NvBounds3& hull0Bounds, const nvidia::NvTransform& localToWorldRT0In, const nvidia::NvVec3& scale0In,
+ uint32_t hull1Count, const NvVec3* hull1, NvBounds3& hull1Bounds, const nvidia::NvTransform& localToWorldRT1In, const nvidia::NvVec3& scale1In,
+ float maxDistance, Separation* separation)
{
- const PxU32 numVerts0 = static_cast(hull0Count);
- const PxU32 numVerts1 = static_cast(hull1Count);
- const PxU32 numAov0 = (numVerts0 + 3) >> 2;
- const PxU32 numAov1 = (numVerts1 + 3) >> 2;
+ const uint32_t numVerts0 = static_cast(hull0Count);
+ const uint32_t numVerts1 = static_cast(hull1Count);
+ const uint32_t numAov0 = (numVerts0 + 3) >> 2;
+ const uint32_t numAov1 = (numVerts1 + 3) >> 2;
- const PxU32 buffSize = (numAov0 + numAov1) * sizeof(Vec4V) * 3;
+ const uint32_t buffSize = (numAov0 + numAov1) * sizeof(Vec4V) * 3;
void* buff = nullptr;
ALLOCATE_TEMP_MEMORY(buff, buffSize);
Vec4V* verts0 = (Vec4V*)buff;
// Make sure it's aligned
- PX_ASSERT((size_t(verts0) & 0xf) == 0);
+ NVBLAST_ASSERT((size_t(verts0) & 0xf) == 0);
Vec4V* verts1 = verts0 + (numAov0 * 3);
const Vec3V scale0 = V3LoadU(&scale0In.x);
const Vec3V scale1 = V3LoadU(&scale1In.x);
- std::vector vert0(numVerts0);
+ std::vector vert0(numVerts0);
for (uint32_t i = 0; i < numVerts0; ++i)
{
vert0[i] = hull0[i];
}
- std::vector vert1(numVerts1);
+ std::vector vert1(numVerts1);
for (uint32_t i = 0; i < numVerts1; ++i)
{
vert1[i] = hull1[i];
@@ -1013,12 +1015,12 @@ bool importerHullsInProximityApexFree(uint32_t hull0Count, const PxVec3* hull0,
_arrayVec3ToVec4(vert0.data(), scale0, verts0, numVerts0);
_arrayVec3ToVec4(vert1.data(), scale1, verts1, numVerts1);
- const PxTransform trans1To0 = localToWorldRT0In.transformInv(localToWorldRT1In);
+ const NvTransform trans1To0 = localToWorldRT0In.transformInv(localToWorldRT1In);
// Load into simd mat
Mat34V bToA;
- *(PxMat44*)&bToA = trans1To0;
- (*(PxMat44*)&bToA).column3.w = 0.0f; // AOS wants the 4th component of Vec3V to be 0 to work properly
+ *(NvMat44*)&bToA = trans1To0;
+ (*(NvMat44*)&bToA).column3.w = 0.0f; // AOS wants the 4th component of Vec3V to be 0 to work properly
ConvexV convexA;
ConvexV convexB;
@@ -1029,8 +1031,8 @@ bool importerHullsInProximityApexFree(uint32_t hull0Count, const PxVec3* hull0,
convexB.mNumAovVertices = numAov1;
convexB.mAovVertices = verts1;
- const physx::PxVec3 hullACenter = hull0Bounds.getCenter();
- const physx::PxVec3 hullBCenter = hull1Bounds.getCenter();
+ const nvidia::NvVec3 hullACenter = hull0Bounds.getCenter();
+ const nvidia::NvVec3 hullBCenter = hull1Bounds.getCenter();
const Vec3V centroidA = V3LoadU(&hullACenter.x);
const Vec3V centroidB = M34MulV3(bToA, V3LoadU(&hullBCenter.x));
@@ -1045,8 +1047,8 @@ bool importerHullsInProximityApexFree(uint32_t hull0Count, const PxVec3* hull0,
if (status == STATUS_DEGENERATE)
{
// Calculate the tolerance from the extents
- const PxVec3 extents0 = hull0Bounds.getExtents();
- const PxVec3 extents1 = hull1Bounds.getExtents();
+ const NvVec3 extents0 = hull0Bounds.getExtents();
+ const NvVec3 extents1 = hull1Bounds.getExtents();
const FloatV tolerance0 = V3ExtractMin(V3Mul(V3LoadU(&extents0.x), scale0));
const FloatV tolerance1 = V3ExtractMin(V3Mul(V3LoadU(&extents1.x), scale1));
@@ -1074,7 +1076,7 @@ bool importerHullsInProximityApexFree(uint32_t hull0Count, const PxVec3* hull0,
{
_calcSeparation(convexA, localToWorldRT0In, bToA, convexB, initialDir, output, *separation);
}
- PxF32 val;
+ float val;
FStore(output.mDistSq, &val);
return val < (maxDistance * maxDistance);
}
diff --git a/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.h b/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.h
index d87b778d7..5840b1dec 100644
--- a/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.h
+++ b/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.h
@@ -22,19 +22,19 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAPEXSHAREDPARTS_H
#define NVBLASTEXTAPEXSHAREDPARTS_H
#include "NvBlast.h"
-#include
-namespace physx
+#include "NvPlane.h"
+namespace nvidia
{
- class PxVec3;
- class PxTransform;
- class PxBounds3;
+ class NvVec3;
+ class NvTransform;
+ class NvBounds3;
}
namespace Nv
@@ -44,21 +44,21 @@ namespace Blast
struct Separation
{
- physx::PxPlane plane;
+ nvidia::NvPlane plane;
float min0, max0, min1, max1;
float getDistance()
{
- return physx::PxMax(min0 - max1, min1 - max0);
+ return nvidia::NvMax(min0 - max1, min1 - max0);
}
};
/**
Function to compute midplane between two convex hulls. Is copied from APEX.
*/
-bool importerHullsInProximityApexFree( uint32_t hull0Count, const physx::PxVec3* hull0, physx::PxBounds3& hull0Bounds, const physx::PxTransform& localToWorldRT0In, const physx::PxVec3& scale0In,
- uint32_t hull1Count, const physx::PxVec3* hull1, physx::PxBounds3& hull1Bounds, const physx::PxTransform& localToWorldRT1In, const physx::PxVec3& scale1In,
- physx::PxF32 maxDistance, Separation* separation);
+bool importerHullsInProximityApexFree( uint32_t hull0Count, const nvidia::NvVec3* hull0, nvidia::NvBounds3& hull0Bounds, const nvidia::NvTransform& localToWorldRT0In, const nvidia::NvVec3& scale0In,
+ uint32_t hull1Count, const nvidia::NvVec3* hull1, nvidia::NvBounds3& hull1Bounds, const nvidia::NvTransform& localToWorldRT1In, const nvidia::NvVec3& scale1In,
+ float maxDistance, Separation* separation);
} // namespace Blast
} // namespace Nv
diff --git a/blast/source/sdk/extensions/authoring/NvBlastExtAuthoring.cpp b/blast/source/sdk/extensions/authoring/NvBlastExtAuthoring.cpp
index a8279f586..24a9bb665 100644
--- a/blast/source/sdk/extensions/authoring/NvBlastExtAuthoring.cpp
+++ b/blast/source/sdk/extensions/authoring/NvBlastExtAuthoring.cpp
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2016-2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtAuthoring.h"
#include "NvBlastTypes.h"
@@ -41,13 +41,13 @@
#include "NvBlastExtAuthoringCollisionBuilderImpl.h"
#include "NvBlastExtAuthoringCutoutImpl.h"
#include "NvBlastExtAuthoringInternalCommon.h"
-#include "NvBlastPxSharedHelpers.h"
+#include "NvBlastNvSharedHelpers.h"
#include
#include
using namespace Nv::Blast;
-using namespace physx;
+using namespace nvidia;
#define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr;
#define SAFE_ARRAY_DELETE(x) if (x != nullptr) {NVBLAST_FREE(x); x = nullptr;}
@@ -117,21 +117,21 @@ void NvBlastExtAuthoringTrimCollisionGeometry(ConvexMeshBuilder* cmb, uint32_t c
void NvBlastExtAuthoringTransformCollisionHullInPlace(CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation)
{
// Local copies of scaling (S), rotation (R), and translation (T)
- physx::PxVec3 S = { 1, 1, 1 };
- physx::PxQuat R = { 0, 0, 0, 1 };
- physx::PxVec3 T = { 0, 0, 0 };
- physx::PxVec3 cofS = { 1, 1, 1 };
+ nvidia::NvVec3 S = { 1, 1, 1 };
+ nvidia::NvQuat R = { 0, 0, 0, 1 };
+ nvidia::NvVec3 T = { 0, 0, 0 };
+ nvidia::NvVec3 cofS = { 1, 1, 1 };
float sgnDetS = 1;
{
if (rotation)
{
- R = *toPxShared(rotation);
+ R = *toNvShared(rotation);
}
if (scaling)
{
- S = *toPxShared(scaling);
+ S = *toNvShared(scaling);
cofS.x = S.y * S.z;
cofS.y = S.z * S.x;
cofS.z = S.x * S.y;
@@ -140,14 +140,14 @@ void NvBlastExtAuthoringTransformCollisionHullInPlace(CollisionHull* hull, const
if (translation)
{
- T = *toPxShared(translation);
+ T = *toNvShared(translation);
}
}
const uint32_t pointCount = hull->pointsCount;
for (uint32_t pi = 0; pi < pointCount; pi++)
{
- physx::PxVec3& p = toPxShared(hull->points[pi]);
+ nvidia::NvVec3& p = toNvShared(hull->points[pi]);
p = (R.rotate(p.multiply(S)) + T);
}
@@ -155,11 +155,11 @@ void NvBlastExtAuthoringTransformCollisionHullInPlace(CollisionHull* hull, const
for (uint32_t pi = 0; pi < planeCount; pi++)
{
float* plane = hull->polygonData[pi].plane;
- physx::PxPlane pxPlane(plane[0], plane[1], plane[2], plane[3]);
- PxVec3 transformedNormal = sgnDetS*R.rotate(pxPlane.n.multiply(cofS)).getNormalized();
- PxVec3 transformedPt = R.rotate(pxPlane.pointInPlane().multiply(S)) + T;
+ nvidia::NvPlane nvPlane(plane[0], plane[1], plane[2], plane[3]);
+ NvVec3 transformedNormal = sgnDetS*R.rotate(nvPlane.n.multiply(cofS)).getNormalized();
+ NvVec3 transformedPt = R.rotate(nvPlane.pointInPlane().multiply(S)) + T;
- physx::PxPlane transformedPlane(transformedPt, transformedNormal);
+ nvidia::NvPlane transformedPlane(transformedPt, transformedNormal);
plane[0] = transformedPlane.n[0];
plane[1] = transformedPlane.n[1];
plane[2] = transformedPlane.n[2];
@@ -411,7 +411,7 @@ AuthoringResult* NvBlastExtAuthoringProcessFracture(FractureTool& fTool, BlastBo
// prepare physics data (convexes)
buildPhysicsChunks(collisionBuilder, aResult, collisionParam);
- // set NvBlastChunk volume and centroid from Px geometry
+ // set NvBlastChunk volume and centroid from CollisionHull
for (uint32_t i = 0; i < chunkCount; i++)
{
float totalVolume = 0.f;
@@ -503,7 +503,7 @@ uint32_t NvBlastExtAuthoringFindAssetConnectingBonds
std::vector originalComponentIndex;
- const physx::PxVec3 identityScale(1);
+ const nvidia::NvVec3 identityScale(1);
//Combine our hull lists into a single combined list for bondsFromPrefractured
for (uint32_t c = 0; c < componentCount; c++)
@@ -520,9 +520,9 @@ uint32_t NvBlastExtAuthoringFindAssetConnectingBonds
const uint32_t hullsEnd = convexHullOffsets[c][chunk + 1];
for (uint32_t hull = hullsStart; hull < hullsEnd; hull++)
{
- if ((scale != nullptr && *toPxShared(scale) != identityScale) ||
- (rotation != nullptr && !toPxShared(rotation)->isIdentity()) ||
- (translation != nullptr && !toPxShared(translation)->isZero()))
+ if ((scale != nullptr && *toNvShared(scale) != identityScale) ||
+ (rotation != nullptr && !toNvShared(rotation)->isIdentity()) ||
+ (translation != nullptr && !toNvShared(translation)->isZero()))
{
hullsToRelease.emplace_back(NvBlastExtAuthoringTransformCollisionHull(chunkHulls[c][hull], scale, rotation, translation));
combinedConvexHulls.emplace_back(hullsToRelease.back());
diff --git a/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.cpp b/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.cpp
index 00953cb2c..38945abde 100644
--- a/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.cpp
+++ b/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.cpp
@@ -22,7 +22,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
// This warning arises when using some stl containers with older versions of VC
@@ -35,14 +35,14 @@
#include
#include
#include
-#include
+#include
#include "NvBlastExtTriangleProcessor.h"
#include "NvBlastExtApexSharedParts.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#include "NvBlastExtAuthoringTypes.h"
#include
#include