diff --git a/.github/workflows/sanitizer.yml b/.github/workflows/sanitizer.yml
index 57d6e78f1..b23f4520f 100644
--- a/.github/workflows/sanitizer.yml
+++ b/.github/workflows/sanitizer.yml
@@ -57,3 +57,13 @@ jobs:
- name: test
run: cd build/bin && ./unit
shell: bash
+
+ job3:
+ name: printf-sanitizer
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: run scan_printf script
+ run: ./scripts/scan_printf.sh
+ shell: bash
diff --git a/Readme.md b/Readme.md
index 917b8e8aa..1af71fad8 100644
--- a/Readme.md
+++ b/Readme.md
@@ -14,7 +14,6 @@ A library to import and export various 3d-model-formats including scene-post-pro
[![Join the chat at https://gitter.im/assimp/assimp](https://badges.gitter.im/assimp/assimp.svg)](https://gitter.im/assimp/assimp?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/assimp/assimp.svg)](http://isitmaintained.com/project/assimp/assimp "Average time to resolve an issue")
[![Percentage of issues still open](http://isitmaintained.com/badge/open/assimp/assimp.svg)](http://isitmaintained.com/project/assimp/assimp "Percentage of issues still open")
-[![Total alerts](https://img.shields.io/lgtm/alerts/g/assimp/assimp.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/assimp/assimp/alerts/)
APIs are provided for C and C++. There are various bindings to other languages (C#, Java, Python, Delphi, D). Assimp also runs on Android and iOS.
diff --git a/code/AssetLib/Blender/BlenderScene.cpp b/code/AssetLib/Blender/BlenderScene.cpp
index 3a9a02fd0..ac10d7302 100644
--- a/code/AssetLib/Blender/BlenderScene.cpp
+++ b/code/AssetLib/Blender/BlenderScene.cpp
@@ -569,7 +569,7 @@ void Structure ::Convert(
const FileDatabase &db) const {
ReadFieldArray(dest.co, "co", db);
- ReadFieldArray(dest.no, "no", db);
+ ReadFieldArray(dest.no, "no", db);
ReadField(dest.flag, "flag", db);
//ReadField(dest.mat_nr,"mat_nr",db);
ReadField(dest.bweight, "bweight", db);
diff --git a/code/AssetLib/FBX/FBXConverter.cpp b/code/AssetLib/FBX/FBXConverter.cpp
index d45919e10..f1f57c10b 100644
--- a/code/AssetLib/FBX/FBXConverter.cpp
+++ b/code/AssetLib/FBX/FBXConverter.cpp
@@ -873,8 +873,12 @@ void FBXConverter::SetupNodeMetadata(const Model &model, aiNode &nd) {
data->Set(index++, prop.first, interpretedBool->Value());
} else if (const TypedProperty *interpretedInt = prop.second->As>()) {
data->Set(index++, prop.first, interpretedInt->Value());
+ } else if (const TypedProperty *interpretedUInt = prop.second->As>()) {
+ data->Set(index++, prop.first, interpretedUInt->Value());
} else if (const TypedProperty *interpretedUint64 = prop.second->As>()) {
data->Set(index++, prop.first, interpretedUint64->Value());
+ } else if (const TypedProperty *interpretedint64 = prop.second->As>()) {
+ data->Set(index++, prop.first, interpretedint64->Value());
} else if (const TypedProperty *interpretedFloat = prop.second->As>()) {
data->Set(index++, prop.first, interpretedFloat->Value());
} else if (const TypedProperty *interpretedString = prop.second->As>()) {
@@ -1176,15 +1180,23 @@ unsigned int FBXConverter::ConvertMeshSingleMaterial(const MeshGeometry &mesh, c
std::vector animMeshes;
for (const BlendShape *blendShape : mesh.GetBlendShapes()) {
for (const BlendShapeChannel *blendShapeChannel : blendShape->BlendShapeChannels()) {
- const std::vector &shapeGeometries = blendShapeChannel->GetShapeGeometries();
- for (size_t i = 0; i < shapeGeometries.size(); i++) {
+ const auto& shapeGeometries = blendShapeChannel->GetShapeGeometries();
+ for (const ShapeGeometry *shapeGeometry : shapeGeometries) {
aiAnimMesh *animMesh = aiCreateAnimMesh(out_mesh);
- const ShapeGeometry *shapeGeometry = shapeGeometries.at(i);
- const std::vector &curVertices = shapeGeometry->GetVertices();
- const std::vector &curNormals = shapeGeometry->GetNormals();
- const std::vector &curIndices = shapeGeometry->GetIndices();
+ const auto &curVertices = shapeGeometry->GetVertices();
+ const auto &curNormals = shapeGeometry->GetNormals();
+ const auto &curIndices = shapeGeometry->GetIndices();
//losing channel name if using shapeGeometry->Name()
- animMesh->mName.Set(FixAnimMeshName(blendShapeChannel->Name()));
+ // if blendShapeChannel Name is empty or don't have a ".", add geoMetryName;
+ auto aniName = FixAnimMeshName(blendShapeChannel->Name());
+ auto geoMetryName = FixAnimMeshName(shapeGeometry->Name());
+ if (aniName.empty()) {
+ aniName = geoMetryName;
+ }
+ else if (aniName.find('.') == aniName.npos) {
+ aniName += "." + geoMetryName;
+ }
+ animMesh->mName.Set(aniName);
for (size_t j = 0; j < curIndices.size(); j++) {
const unsigned int curIndex = curIndices.at(j);
aiVector3D vertex = curVertices.at(j);
@@ -1406,13 +1418,12 @@ unsigned int FBXConverter::ConvertMeshMultiMaterial(const MeshGeometry &mesh, co
std::vector animMeshes;
for (const BlendShape *blendShape : mesh.GetBlendShapes()) {
for (const BlendShapeChannel *blendShapeChannel : blendShape->BlendShapeChannels()) {
- const std::vector &shapeGeometries = blendShapeChannel->GetShapeGeometries();
- for (size_t i = 0; i < shapeGeometries.size(); i++) {
+ const auto& shapeGeometries = blendShapeChannel->GetShapeGeometries();
+ for (const ShapeGeometry *shapeGeometry : shapeGeometries) {
aiAnimMesh *animMesh = aiCreateAnimMesh(out_mesh);
- const ShapeGeometry *shapeGeometry = shapeGeometries.at(i);
- const std::vector &curVertices = shapeGeometry->GetVertices();
- const std::vector &curNormals = shapeGeometry->GetNormals();
- const std::vector &curIndices = shapeGeometry->GetIndices();
+ const auto& curVertices = shapeGeometry->GetVertices();
+ const auto& curNormals = shapeGeometry->GetNormals();
+ const auto& curIndices = shapeGeometry->GetIndices();
animMesh->mName.Set(FixAnimMeshName(shapeGeometry->Name()));
for (size_t j = 0; j < curIndices.size(); j++) {
unsigned int curIndex = curIndices.at(j);
diff --git a/code/AssetLib/FBX/FBXDeformer.cpp b/code/AssetLib/FBX/FBXDeformer.cpp
index df134a401..1aab55ea9 100644
--- a/code/AssetLib/FBX/FBXDeformer.cpp
+++ b/code/AssetLib/FBX/FBXDeformer.cpp
@@ -154,8 +154,10 @@ BlendShape::BlendShape(uint64_t id, const Element& element, const Document& doc,
for (const Connection* con : conns) {
const BlendShapeChannel* const bspc = ProcessSimpleConnection(*con, false, "BlendShapeChannel -> BlendShape", element);
if (bspc) {
- blendShapeChannels.push_back(bspc);
- continue;
+ auto pr = blendShapeChannels.insert(bspc);
+ if (!pr.second) {
+ FBXImporter::LogWarn("there is the same blendShapeChannel id ", bspc->ID());
+ }
}
}
}
@@ -179,8 +181,10 @@ BlendShapeChannel::BlendShapeChannel(uint64_t id, const Element& element, const
for (const Connection* con : conns) {
const ShapeGeometry* const sg = ProcessSimpleConnection(*con, false, "Shape -> BlendShapeChannel", element);
if (sg) {
- shapeGeometries.push_back(sg);
- continue;
+ auto pr = shapeGeometries.insert(sg);
+ if (!pr.second) {
+ FBXImporter::LogWarn("there is the same shapeGeometrie id ", sg->ID());
+ }
}
}
}
diff --git a/code/AssetLib/FBX/FBXDocument.h b/code/AssetLib/FBX/FBXDocument.h
index 8873d65fd..821d4d5cb 100644
--- a/code/AssetLib/FBX/FBXDocument.h
+++ b/code/AssetLib/FBX/FBXDocument.h
@@ -46,6 +46,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define INCLUDED_AI_FBX_DOCUMENT_H
#include
+#include
#include
#include
#include "FBXProperties.h"
@@ -855,14 +856,14 @@ public:
return fullWeights;
}
- const std::vector& GetShapeGeometries() const {
+ const std::unordered_set& GetShapeGeometries() const {
return shapeGeometries;
}
private:
float percent;
WeightArray fullWeights;
- std::vector shapeGeometries;
+ std::unordered_set shapeGeometries;
};
/** DOM class for BlendShape deformers */
@@ -872,12 +873,12 @@ public:
virtual ~BlendShape();
- const std::vector& BlendShapeChannels() const {
+ const std::unordered_set& BlendShapeChannels() const {
return blendShapeChannels;
}
private:
- std::vector blendShapeChannels;
+ std::unordered_set blendShapeChannels;
};
/** DOM class for skin deformer clusters (aka sub-deformers) */
diff --git a/code/AssetLib/FBX/FBXMeshGeometry.cpp b/code/AssetLib/FBX/FBXMeshGeometry.cpp
index ace4ad749..fcbaac169 100644
--- a/code/AssetLib/FBX/FBXMeshGeometry.cpp
+++ b/code/AssetLib/FBX/FBXMeshGeometry.cpp
@@ -69,13 +69,16 @@ Geometry::Geometry(uint64_t id, const Element& element, const std::string& name,
}
const BlendShape* const bsp = ProcessSimpleConnection(*con, false, "BlendShape -> Geometry", element);
if (bsp) {
- blendShapes.push_back(bsp);
+ auto pr = blendShapes.insert(bsp);
+ if (!pr.second) {
+ FBXImporter::LogWarn("there is the same blendShape id ", bsp->ID());
+ }
}
}
}
// ------------------------------------------------------------------------------------------------
-const std::vector& Geometry::GetBlendShapes() const {
+const std::unordered_set& Geometry::GetBlendShapes() const {
return blendShapes;
}
diff --git a/code/AssetLib/FBX/FBXMeshGeometry.h b/code/AssetLib/FBX/FBXMeshGeometry.h
index f4a1a2673..3d67ec567 100644
--- a/code/AssetLib/FBX/FBXMeshGeometry.h
+++ b/code/AssetLib/FBX/FBXMeshGeometry.h
@@ -62,7 +62,7 @@ public:
/// @param name The name instance
/// @param doc The document instance
Geometry( uint64_t id, const Element& element, const std::string& name, const Document& doc );
-
+
/// @brief The class destructor, default.
virtual ~Geometry() = default;
@@ -72,11 +72,12 @@ public:
/// @brief Get the BlendShape attached to this geometry or nullptr
/// @return The blendshape arrays.
- const std::vector& GetBlendShapes() const;
+ const std::unordered_set& GetBlendShapes() const;
private:
const Skin* skin;
- std::vector blendShapes;
+ std::unordered_set blendShapes;
+
};
typedef std::vector MatIndexArray;
@@ -112,7 +113,7 @@ public:
/// @return The binomal vector.
const std::vector& GetBinormals() const;
- /// @brief Return list of faces - each entry denotes a face and specifies how many vertices it has.
+ /// @brief Return list of faces - each entry denotes a face and specifies how many vertices it has.
/// Vertices are taken from the vertex data arrays in sequential order.
/// @return The face indices vector.
const std::vector& GetFaceIndexCounts() const;
diff --git a/code/AssetLib/MDL/HalfLife/HL1MDLLoader.cpp b/code/AssetLib/MDL/HalfLife/HL1MDLLoader.cpp
index 93d37536c..a8141fcc1 100644
--- a/code/AssetLib/MDL/HalfLife/HL1MDLLoader.cpp
+++ b/code/AssetLib/MDL/HalfLife/HL1MDLLoader.cpp
@@ -470,14 +470,16 @@ void HL1MDLLoader::read_bones() {
temp_bones_.resize(header_->numbones);
+ // Create the main 'bones' node that will contain all MDL root bones.
aiNode *bones_node = new aiNode(AI_MDL_HL1_NODE_BONES);
rootnode_children_.push_back(bones_node);
- bones_node->mNumChildren = static_cast(header_->numbones);
- bones_node->mChildren = new aiNode *[bones_node->mNumChildren];
+
+ // Store roots bones IDs temporarily.
+ std::vector roots;
// Create bone matrices in local space.
for (int i = 0; i < header_->numbones; ++i) {
- aiNode *bone_node = temp_bones_[i].node = bones_node->mChildren[i] = new aiNode(unique_bones_names[i]);
+ aiNode *bone_node = temp_bones_[i].node = new aiNode(unique_bones_names[i]);
aiVector3D angles(pbone[i].value[3], pbone[i].value[4], pbone[i].value[5]);
temp_bones_[i].absolute_transform = bone_node->mTransformation =
@@ -485,9 +487,11 @@ void HL1MDLLoader::read_bones() {
aiVector3D(pbone[i].value[0], pbone[i].value[1], pbone[i].value[2]));
if (pbone[i].parent == -1) {
- bone_node->mParent = scene_->mRootNode;
+ bone_node->mParent = bones_node;
+ roots.push_back(i); // This bone has no parent. Add it to the roots list.
} else {
- bone_node->mParent = bones_node->mChildren[pbone[i].parent];
+ bone_node->mParent = temp_bones_[pbone[i].parent].node;
+ temp_bones_[pbone[i].parent].children.push_back(i); // Add this bone to the parent bone's children list.
temp_bones_[i].absolute_transform =
temp_bones_[pbone[i].parent].absolute_transform * bone_node->mTransformation;
@@ -496,6 +500,36 @@ void HL1MDLLoader::read_bones() {
temp_bones_[i].offset_matrix = temp_bones_[i].absolute_transform;
temp_bones_[i].offset_matrix.Inverse();
}
+
+ // Allocate memory for each MDL root bone.
+ bones_node->mNumChildren = static_cast(roots.size());
+ bones_node->mChildren = new aiNode *[bones_node->mNumChildren];
+
+ // Build all bones children hierarchy starting from each MDL root bone.
+ for (size_t i = 0; i < roots.size(); ++i)
+ {
+ const TempBone &root_bone = temp_bones_[roots[i]];
+ bones_node->mChildren[i] = root_bone.node;
+ build_bone_children_hierarchy(root_bone);
+ }
+}
+
+void HL1MDLLoader::build_bone_children_hierarchy(const TempBone &bone)
+{
+ if (bone.children.empty())
+ return;
+
+ aiNode* bone_node = bone.node;
+ bone_node->mNumChildren = static_cast(bone.children.size());
+ bone_node->mChildren = new aiNode *[bone_node->mNumChildren];
+
+ // Build each child bone's hierarchy recursively.
+ for (size_t i = 0; i < bone.children.size(); ++i)
+ {
+ const TempBone &child_bone = temp_bones_[bone.children[i]];
+ bone_node->mChildren[i] = child_bone.node;
+ build_bone_children_hierarchy(child_bone);
+ }
}
// ------------------------------------------------------------------------------------------------
diff --git a/code/AssetLib/MDL/HalfLife/HL1MDLLoader.h b/code/AssetLib/MDL/HalfLife/HL1MDLLoader.h
index 0dba5099d..286b6e64c 100644
--- a/code/AssetLib/MDL/HalfLife/HL1MDLLoader.h
+++ b/code/AssetLib/MDL/HalfLife/HL1MDLLoader.h
@@ -143,6 +143,14 @@ private:
*/
static bool get_num_blend_controllers(const int num_blend_animations, int &num_blend_controllers);
+ /**
+ * \brief Build a bone's node children hierarchy.
+ *
+ * \param[in] bone The bone for which we must build all children hierarchy.
+ */
+ struct TempBone;
+ void build_bone_children_hierarchy(const TempBone& bone);
+
/** Output scene to be filled */
aiScene *scene_;
@@ -198,11 +206,13 @@ private:
TempBone() :
node(nullptr),
absolute_transform(),
- offset_matrix() {}
+ offset_matrix(),
+ children() {}
aiNode *node;
aiMatrix4x4 absolute_transform;
aiMatrix4x4 offset_matrix;
+ std::vector children; // Bone children
};
std::vector temp_bones_;
diff --git a/code/AssetLib/Ogre/OgreXmlSerializer.cpp b/code/AssetLib/Ogre/OgreXmlSerializer.cpp
index cd9d6dcc2..a8faaec34 100644
--- a/code/AssetLib/Ogre/OgreXmlSerializer.cpp
+++ b/code/AssetLib/Ogre/OgreXmlSerializer.cpp
@@ -490,7 +490,7 @@ bool OgreXmlSerializer::ImportSkeleton(Assimp::IOSystem *pIOHandler, MeshXml *me
OgreXmlSerializer serializer(xmlParser.get());
XmlNode root = xmlParser->getRootNode();
if (std::string(root.name()) != nnSkeleton) {
- printf("\nSkeleton is not a valid root: %s\n", root.name());
+ ASSIMP_LOG_VERBOSE_DEBUG("nSkeleton is not a valid root: ", root.name(), ".");
for (auto &a : root.children()) {
if (std::string(a.name()) == nnSkeleton) {
root = a;
diff --git a/code/CApi/AssimpCExport.cpp b/code/CApi/AssimpCExport.cpp
index 5e43958d0..21e40205c 100644
--- a/code/CApi/AssimpCExport.cpp
+++ b/code/CApi/AssimpCExport.cpp
@@ -5,8 +5,6 @@ Open Asset Import Library (assimp)
Copyright (c) 2006-2022, assimp team
-
-
All rights reserved.
Redistribution and use of this software in source and binary forms,
diff --git a/code/CApi/CInterfaceIOWrapper.cpp b/code/CApi/CInterfaceIOWrapper.cpp
index 579545ecc..f0e46cd08 100644
--- a/code/CApi/CInterfaceIOWrapper.cpp
+++ b/code/CApi/CInterfaceIOWrapper.cpp
@@ -5,8 +5,6 @@ Open Asset Import Library (assimp)
Copyright (c) 2006-2022, assimp team
-
-
All rights reserved.
Redistribution and use of this software in source and binary forms,
@@ -47,14 +45,16 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace Assimp {
+// ------------------------------------------------------------------------------------------------
CIOStreamWrapper::~CIOStreamWrapper() {
- /* Various places depend on this destructor to close the file */
- if (mFile) {
+ // Various places depend on this destructor to close the file
+ if (mFile != nullptr) {
+
mIO->mFileSystem->CloseProc(mIO->mFileSystem, mFile);
}
}
-// ...................................................................
+// ------------------------------------------------------------------------------------------------
size_t CIOStreamWrapper::Read(void *pvBuffer,
size_t pSize,
size_t pCount) {
@@ -62,7 +62,7 @@ size_t CIOStreamWrapper::Read(void *pvBuffer,
return mFile->ReadProc(mFile, (char *)pvBuffer, pSize, pCount);
}
-// ...................................................................
+// ------------------------------------------------------------------------------------------------
size_t CIOStreamWrapper::Write(const void *pvBuffer,
size_t pSize,
size_t pCount) {
@@ -70,23 +70,23 @@ size_t CIOStreamWrapper::Write(const void *pvBuffer,
return mFile->WriteProc(mFile, (const char *)pvBuffer, pSize, pCount);
}
-// ...................................................................
+// ------------------------------------------------------------------------------------------------
aiReturn CIOStreamWrapper::Seek(size_t pOffset,
aiOrigin pOrigin) {
return mFile->SeekProc(mFile, pOffset, pOrigin);
}
-// ...................................................................
+// ------------------------------------------------------------------------------------------------
size_t CIOStreamWrapper::Tell() const {
return mFile->TellProc(mFile);
}
-// ...................................................................
+// ------------------------------------------------------------------------------------------------
size_t CIOStreamWrapper::FileSize() const {
return mFile->FileSizeProc(mFile);
}
-// ...................................................................
+// ------------------------------------------------------------------------------------------------
void CIOStreamWrapper::Flush() {
return mFile->FlushProc(mFile);
}
diff --git a/code/CApi/CInterfaceIOWrapper.h b/code/CApi/CInterfaceIOWrapper.h
index 768be3746..28d4c3e75 100644
--- a/code/CApi/CInterfaceIOWrapper.h
+++ b/code/CApi/CInterfaceIOWrapper.h
@@ -47,48 +47,59 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include
#include
#include
+#include
namespace Assimp {
class CIOSystemWrapper;
// ------------------------------------------------------------------------------------------------
-// Custom IOStream implementation for the C-API
-class CIOStreamWrapper : public IOStream {
+/// @brief Custom IOStream implementation for the C-API-
+// ------------------------------------------------------------------------------------------------
+class CIOStreamWrapper final : public IOStream {
public:
- explicit CIOStreamWrapper(aiFile *pFile, CIOSystemWrapper *io) :
- mFile(pFile),
- mIO(io) {}
- ~CIOStreamWrapper(void);
-
- size_t Read(void *pvBuffer, size_t pSize, size_t pCount);
- size_t Write(const void *pvBuffer, size_t pSize, size_t pCount);
- aiReturn Seek(size_t pOffset, aiOrigin pOrigin);
- size_t Tell(void) const;
- size_t FileSize() const;
- void Flush();
+ explicit CIOStreamWrapper(aiFile *pFile, CIOSystemWrapper *io);
+ ~CIOStreamWrapper() override;
+ size_t Read(void *pvBuffer, size_t pSize, size_t pCount) override;
+ size_t Write(const void *pvBuffer, size_t pSize, size_t pCount) override;
+ aiReturn Seek(size_t pOffset, aiOrigin pOrigin) override;
+ size_t Tell(void) const override;
+ size_t FileSize() const override;
+ void Flush() override;
private:
aiFile *mFile;
CIOSystemWrapper *mIO;
};
-class CIOSystemWrapper : public IOSystem {
+inline CIOStreamWrapper::CIOStreamWrapper(aiFile *pFile, CIOSystemWrapper *io) :
+ mFile(pFile),
+ mIO(io) {
+ ai_assert(io != nullptr);
+}
+
+// ------------------------------------------------------------------------------------------------
+/// @brief Custom IO-System wrapper implementation for the C-API.
+// ------------------------------------------------------------------------------------------------
+class CIOSystemWrapper final : public IOSystem {
friend class CIOStreamWrapper;
public:
- explicit CIOSystemWrapper(aiFileIO *pFile) :
- mFileSystem(pFile) {}
-
- bool Exists(const char *pFile) const;
- char getOsSeparator() const;
- IOStream *Open(const char *pFile, const char *pMode = "rb");
- void Close(IOStream *pFile);
+ explicit CIOSystemWrapper(aiFileIO *pFile);
+ ~CIOSystemWrapper() override = default;
+ bool Exists(const char *pFile) const override;
+ char getOsSeparator() const override;
+ IOStream *Open(const char *pFile, const char *pMode = "rb") override;
+ void Close(IOStream *pFile) override;
private:
aiFileIO *mFileSystem;
};
+inline CIOSystemWrapper::CIOSystemWrapper(aiFileIO *pFile) : mFileSystem(pFile) {
+ ai_assert(pFile != nullptr);
+}
+
} // namespace Assimp
-#endif
+#endif // AI_CIOSYSTEM_H_INCLUDED
diff --git a/code/CMakeLists.txt b/code/CMakeLists.txt
index a098f3e85..ba5415fe0 100644
--- a/code/CMakeLists.txt
+++ b/code/CMakeLists.txt
@@ -218,6 +218,12 @@ SET( CApi_SRCS
)
SOURCE_GROUP(CApi FILES ${CApi_SRCS})
+SET(Geometry_SRCS
+ Geometry/GeometryUtils.h
+ Geometry/GeometryUtils.cpp
+)
+SOURCE_GROUP(Geometry FILES ${Geometry_SRCS})
+
SET( STEPParser_SRCS
AssetLib/STEPParser/STEPFileReader.h
AssetLib/STEPParser/STEPFileReader.cpp
@@ -1129,6 +1135,7 @@ SET( assimp_src
${Core_SRCS}
${CApi_SRCS}
${Common_SRCS}
+ ${Geometry_SRCS}
${Logging_SRCS}
${Exporter_SRCS}
${PostProcessing_SRCS}
diff --git a/code/Common/PolyTools.h b/code/Common/PolyTools.h
index 9837a2991..a5bd1090b 100644
--- a/code/Common/PolyTools.h
+++ b/code/Common/PolyTools.h
@@ -74,26 +74,8 @@ inline bool OnLeftSideOfLine2D(const T& p0, const T& p1,const T& p2) {
* both aiVector3D and aiVector2D, but generally ignores the third coordinate.*/
template
inline bool PointInTriangle2D(const T& p0, const T& p1,const T& p2, const T& pp) {
- // Point in triangle test using baryzentric coordinates
- const aiVector2D v0 = p1 - p0;
- const aiVector2D v1 = p2 - p0;
- const aiVector2D v2 = pp - p0;
-
- double dot00 = v0 * v0;
- double dot11 = v1 * v1;
- const double dot01 = v0 * v1;
- const double dot02 = v0 * v2;
- const double dot12 = v1 * v2;
- const double denom = dot00 * dot11 - dot01 * dot01;
- if (denom == 0.0) {
- return false;
- }
-
- const double invDenom = 1.0 / denom;
- dot11 = (dot11 * dot02 - dot01 * dot12) * invDenom;
- dot00 = (dot00 * dot12 - dot01 * dot02) * invDenom;
-
- return (dot11 > 0) && (dot00 > 0) && (dot11 + dot00 < 1);
+ // pp should be left side of the three triangle side, by ccw arrow
+ return OnLeftSideOfLine2D(p0, p1, pp) && OnLeftSideOfLine2D(p1, p2, pp) && OnLeftSideOfLine2D(p2, p0, pp);
}
diff --git a/code/Common/Subdivision.cpp b/code/Common/Subdivision.cpp
index 705ea3fb3..3aea5d4c5 100644
--- a/code/Common/Subdivision.cpp
+++ b/code/Common/Subdivision.cpp
@@ -50,7 +50,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include
+#include
+
using namespace Assimp;
+
void mydummy() {}
#ifdef _MSC_VER
@@ -78,7 +81,7 @@ public:
};
typedef std::vector UIntVector;
- typedef std::map EdgeMap;
+ typedef std::unordered_map EdgeMap;
// ---------------------------------------------------------------------------
// Hashing function to derive an index into an #EdgeMap from two given
diff --git a/samples/SharedCode/UTFConverter.h b/code/Geometry/GeometryUtils.cpp
similarity index 54%
rename from samples/SharedCode/UTFConverter.h
rename to code/Geometry/GeometryUtils.cpp
index 17e89ee4d..ab735aa6e 100644
--- a/samples/SharedCode/UTFConverter.h
+++ b/code/Geometry/GeometryUtils.cpp
@@ -1,17 +1,14 @@
/*
----------------------------------------------------------------------------
Open Asset Import Library (assimp)
----------------------------------------------------------------------------
-
-Copyright (c) 2006-2020, assimp team
-
+----------------------------------------------------------------------
+Copyright (c) 2006-2022, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
-with or without modification, are permitted provided that the following
-conditions are met:
+with or without modification, are permitted provided that the
+following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
@@ -38,55 +35,45 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------------------
+
+----------------------------------------------------------------------
*/
-#ifndef ASSIMP_SAMPLES_SHARED_CODE_UTFCONVERTER_H
-#define ASSIMP_SAMPLES_SHARED_CODE_UTFCONVERTER_H
+#include "GeometryUtils.h"
-#include
-#include
-#include
+#include
-namespace AssimpSamples {
-namespace SharedCode {
+namespace Assimp {
-// Used to convert between multibyte and unicode strings.
-class UTFConverter {
- using UTFConverterImpl = std::wstring_convert, wchar_t>;
-public:
- UTFConverter(const char* s) :
- s_(s),
- ws_(impl_.from_bytes(s)) {
- }
- UTFConverter(const wchar_t* s) :
- s_(impl_.to_bytes(s)),
- ws_(s) {
- }
- UTFConverter(const std::string& s) :
- s_(s),
- ws_(impl_.from_bytes(s)) {
- }
- UTFConverter(const std::wstring& s) :
- s_(impl_.to_bytes(s)),
- ws_(s) {
- }
- inline const char* c_str() const {
- return s_.c_str();
- }
- inline const std::string& str() const {
- return s_;
- }
- inline const wchar_t* c_wstr() const {
- return ws_.c_str();
- }
-private:
- static UTFConverterImpl impl_;
- std::string s_;
- std::wstring ws_;
-};
-
-}
+ai_real GeometryUtils::heron( ai_real a, ai_real b, ai_real c ) {
+ ai_real s = (a + b + c) / 2;
+ ai_real area = pow((s * ( s - a ) * ( s - b ) * ( s - c ) ), (ai_real)0.5 );
+ return area;
}
-#endif // ASSIMP_SAMPLES_SHARED_CODE_UTFCONVERTER_H
+ai_real GeometryUtils::distance3D( const aiVector3D &vA, aiVector3D &vB ) {
+ const ai_real lx = ( vB.x - vA.x );
+ const ai_real ly = ( vB.y - vA.y );
+ const ai_real lz = ( vB.z - vA.z );
+ ai_real a = lx*lx + ly*ly + lz*lz;
+ ai_real d = pow( a, (ai_real)0.5 );
+
+ return d;
+}
+
+ai_real GeometryUtils::calculateAreaOfTriangle( const aiFace& face, aiMesh* mesh ) {
+ ai_real area = 0;
+
+ aiVector3D vA( mesh->mVertices[ face.mIndices[ 0 ] ] );
+ aiVector3D vB( mesh->mVertices[ face.mIndices[ 1 ] ] );
+ aiVector3D vC( mesh->mVertices[ face.mIndices[ 2 ] ] );
+
+ ai_real a( distance3D( vA, vB ) );
+ ai_real b( distance3D( vB, vC ) );
+ ai_real c( distance3D( vC, vA ) );
+ area = heron( a, b, c );
+
+ return area;
+}
+
+} // namespace Assimp
diff --git a/samples/SharedCode/UTFConverter.cpp b/code/Geometry/GeometryUtils.h
similarity index 60%
rename from samples/SharedCode/UTFConverter.cpp
rename to code/Geometry/GeometryUtils.h
index a1bff7e4b..ab49380de 100644
--- a/samples/SharedCode/UTFConverter.cpp
+++ b/code/Geometry/GeometryUtils.h
@@ -1,17 +1,14 @@
/*
----------------------------------------------------------------------------
Open Asset Import Library (assimp)
----------------------------------------------------------------------------
-
-Copyright (c) 2006-2020, assimp team
-
+----------------------------------------------------------------------
+Copyright (c) 2006-2022, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
-with or without modification, are permitted provided that the following
-conditions are met:
+with or without modification, are permitted provided that the
+following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
@@ -38,15 +35,33 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------------------
+
+----------------------------------------------------------------------
*/
-#include "UTFConverter.h"
+#include
+#include
-namespace AssimpSamples {
-namespace SharedCode {
+namespace Assimp {
-typename UTFConverter::UTFConverterImpl UTFConverter::impl_;
+// ---------------------------------------------------------------------------
+/// @brief This helper class supports some basic geometry algorithms.
+// ---------------------------------------------------------------------------
+class GeometryUtils {
+public:
+ static ai_real heron( ai_real a, ai_real b, ai_real c );
+
+ /// @brief Will compute the distance between 2 3D-vectors
+ /// @param vA Vector a.
+ /// @param vB Vector b.
+ /// @return The distance.
+ static ai_real distance3D( const aiVector3D &vA, aiVector3D &vB );
-}
-}
+ /// @brief Will calculate the area of a triangle described by a aiFace.
+ /// @param face The face
+ /// @param mesh The mesh containing the face
+ /// @return The area.
+ static ai_real calculateAreaOfTriangle( const aiFace& face, aiMesh* mesh );
+};
+
+} // namespace Assimp
diff --git a/code/PostProcessing/CalcTangentsProcess.cpp b/code/PostProcessing/CalcTangentsProcess.cpp
index efc457766..a23ac856b 100644
--- a/code/PostProcessing/CalcTangentsProcess.cpp
+++ b/code/PostProcessing/CalcTangentsProcess.cpp
@@ -60,10 +60,6 @@ CalcTangentsProcess::CalcTangentsProcess() :
// nothing to do here
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-CalcTangentsProcess::~CalcTangentsProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool CalcTangentsProcess::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/CalcTangentsProcess.h b/code/PostProcessing/CalcTangentsProcess.h
index 018789bae..aaccb5307 100644
--- a/code/PostProcessing/CalcTangentsProcess.h
+++ b/code/PostProcessing/CalcTangentsProcess.h
@@ -59,14 +59,11 @@ namespace Assimp
* because the joining of vertices also considers tangents and bitangents for
* uniqueness.
*/
-class ASSIMP_API_WINONLY CalcTangentsProcess : public BaseProcess
-{
+class ASSIMP_API_WINONLY CalcTangentsProcess : public BaseProcess {
public:
-
CalcTangentsProcess();
- ~CalcTangentsProcess();
+ ~CalcTangentsProcess() override = default;
-public:
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag.
* @param pFlags The processing flags the importer was called with.
@@ -74,24 +71,21 @@ public:
* @return true if the process is present in this flag fields,
* false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Called prior to ExecuteOnScene().
* The function is a request to the process to update its configuration
* basing on the Importer's configuration property list.
*/
- void SetupProperties(const Importer* pImp);
-
+ void SetupProperties(const Importer* pImp) override;
// setter for configMaxAngle
- inline void SetMaxSmoothAngle(float f)
- {
+ void SetMaxSmoothAngle(float f) {
configMaxAngle =f;
}
protected:
-
// -------------------------------------------------------------------
/** Calculates tangents and bitangents for a specific mesh.
* @param pMesh The mesh to process.
@@ -103,10 +97,9 @@ protected:
/** Executes the post processing step on the given imported data.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
private:
-
/** Configuration option: maximum smoothing angle, in radians*/
float configMaxAngle;
unsigned int configSourceUV;
diff --git a/code/PostProcessing/ComputeUVMappingProcess.cpp b/code/PostProcessing/ComputeUVMappingProcess.cpp
index 237409f02..a5472668b 100644
--- a/code/PostProcessing/ComputeUVMappingProcess.cpp
+++ b/code/PostProcessing/ComputeUVMappingProcess.cpp
@@ -57,14 +57,6 @@ namespace {
const static ai_real angle_epsilon = ai_real( 0.95 );
}
-// ------------------------------------------------------------------------------------------------
-// Constructor to be privately used by Importer
-ComputeUVMappingProcess::ComputeUVMappingProcess() = default;
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-ComputeUVMappingProcess::~ComputeUVMappingProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool ComputeUVMappingProcess::IsActive( unsigned int pFlags) const
diff --git a/code/PostProcessing/ComputeUVMappingProcess.h b/code/PostProcessing/ComputeUVMappingProcess.h
index 74744be7f..c4158f402 100644
--- a/code/PostProcessing/ComputeUVMappingProcess.h
+++ b/code/PostProcessing/ComputeUVMappingProcess.h
@@ -59,13 +59,10 @@ namespace Assimp {
/** ComputeUVMappingProcess - converts special mappings, such as spherical,
* cylindrical or boxed to proper UV coordinates for rendering.
*/
-class ComputeUVMappingProcess : public BaseProcess
-{
-public:
- ComputeUVMappingProcess();
- ~ComputeUVMappingProcess();
-
+class ComputeUVMappingProcess : public BaseProcess {
public:
+ ComputeUVMappingProcess() = default;
+ ~ComputeUVMappingProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
@@ -73,14 +70,14 @@ public:
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
protected:
@@ -125,8 +122,7 @@ protected:
private:
// temporary structure to describe a mapping
- struct MappingInfo
- {
+ struct MappingInfo {
explicit MappingInfo(aiTextureMapping _type)
: type (_type)
, axis (0.f,1.f,0.f)
@@ -137,8 +133,7 @@ private:
aiVector3D axis;
unsigned int uv;
- bool operator== (const MappingInfo& other)
- {
+ bool operator== (const MappingInfo& other) {
return type == other.type && axis == other.axis;
}
};
diff --git a/code/PostProcessing/ConvertToLHProcess.cpp b/code/PostProcessing/ConvertToLHProcess.cpp
index 359c5a284..08e3fe48a 100644
--- a/code/PostProcessing/ConvertToLHProcess.cpp
+++ b/code/PostProcessing/ConvertToLHProcess.cpp
@@ -79,14 +79,6 @@ void flipUVs(aiMeshType *pMesh) {
} // namespace
-// ------------------------------------------------------------------------------------------------
-// Constructor to be privately used by Importer
-MakeLeftHandedProcess::MakeLeftHandedProcess() = default;
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-MakeLeftHandedProcess::~MakeLeftHandedProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool MakeLeftHandedProcess::IsActive(unsigned int pFlags) const {
@@ -305,14 +297,6 @@ void FlipUVsProcess::ProcessMesh(aiMesh *pMesh) {
#ifndef ASSIMP_BUILD_NO_FLIPWINDING_PROCESS
// # FlipWindingOrderProcess
-// ------------------------------------------------------------------------------------------------
-// Constructor to be privately used by Importer
-FlipWindingOrderProcess::FlipWindingOrderProcess() = default;
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-FlipWindingOrderProcess::~FlipWindingOrderProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool FlipWindingOrderProcess::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/ConvertToLHProcess.h b/code/PostProcessing/ConvertToLHProcess.h
index 474056c3a..d0532277d 100644
--- a/code/PostProcessing/ConvertToLHProcess.h
+++ b/code/PostProcessing/ConvertToLHProcess.h
@@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
Copyright (c) 2006-2022, assimp team
-
All rights reserved.
Redistribution and use of this software in source and binary forms,
@@ -72,22 +71,18 @@ namespace Assimp {
*
* @note RH-LH and LH-RH is the same, so this class can be used for both
*/
-class MakeLeftHandedProcess : public BaseProcess
-{
-
-
+class MakeLeftHandedProcess : public BaseProcess {
public:
- MakeLeftHandedProcess();
- ~MakeLeftHandedProcess();
+ MakeLeftHandedProcess() = default;
+ ~MakeLeftHandedProcess() override = default;
// -------------------------------------------------------------------
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
protected:
-
// -------------------------------------------------------------------
/** Recursively converts a node and all of its children
*/
@@ -120,24 +115,22 @@ protected:
// ---------------------------------------------------------------------------
/** Postprocessing step to flip the face order of the imported data
*/
-class FlipWindingOrderProcess : public BaseProcess
-{
+class FlipWindingOrderProcess : public BaseProcess {
friend class Importer;
public:
/** Constructor to be privately used by Importer */
- FlipWindingOrderProcess();
+ FlipWindingOrderProcess() = default;
/** Destructor, private as well */
- ~FlipWindingOrderProcess();
+ ~FlipWindingOrderProcess() override = default;
// -------------------------------------------------------------------
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
-public:
/** Some other types of post-processing require winding order flips */
static void ProcessMesh( aiMesh* pMesh);
};
diff --git a/code/PostProcessing/DeboneProcess.cpp b/code/PostProcessing/DeboneProcess.cpp
index 22a4397bf..2a8499dc5 100644
--- a/code/PostProcessing/DeboneProcess.cpp
+++ b/code/PostProcessing/DeboneProcess.cpp
@@ -43,42 +43,26 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/// @file DeboneProcess.cpp
/** Implementation of the DeboneProcess post processing step */
-
-
// internal headers of the post-processing framework
#include "ProcessHelper.h"
#include "DeboneProcess.h"
#include
-
using namespace Assimp;
// ------------------------------------------------------------------------------------------------
// Constructor to be privately used by Importer
-DeboneProcess::DeboneProcess()
-{
- mNumBones = 0;
- mNumBonesCanDoWithout = 0;
-
- mThreshold = AI_DEBONE_THRESHOLD;
- mAllOrNone = false;
-}
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-DeboneProcess::~DeboneProcess() = default;
+DeboneProcess::DeboneProcess() : mNumBones(0), mNumBonesCanDoWithout(0), mThreshold(AI_DEBONE_THRESHOLD), mAllOrNone(false) {}
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
-bool DeboneProcess::IsActive( unsigned int pFlags) const
-{
+bool DeboneProcess::IsActive( unsigned int pFlags) const {
return (pFlags & aiProcess_Debone) != 0;
}
// ------------------------------------------------------------------------------------------------
// Executes the post processing step on the given imported data.
-void DeboneProcess::SetupProperties(const Importer* pImp)
-{
+void DeboneProcess::SetupProperties(const Importer* pImp) {
// get the current value of the property
mAllOrNone = pImp->GetPropertyInteger(AI_CONFIG_PP_DB_ALL_OR_NONE,0)?true:false;
mThreshold = pImp->GetPropertyFloat(AI_CONFIG_PP_DB_THRESHOLD,AI_DEBONE_THRESHOLD);
@@ -86,8 +70,7 @@ void DeboneProcess::SetupProperties(const Importer* pImp)
// ------------------------------------------------------------------------------------------------
// Executes the post processing step on the given imported data.
-void DeboneProcess::Execute( aiScene* pScene)
-{
+void DeboneProcess::Execute( aiScene* pScene) {
ASSIMP_LOG_DEBUG("DeboneProcess begin");
if(!pScene->mNumMeshes) {
@@ -117,10 +100,8 @@ void DeboneProcess::Execute( aiScene* pScene)
// build a new array of meshes for the scene
std::vector meshes;
- for(unsigned int a=0;amNumMeshes;a++)
- {
+ for (unsigned int a=0;amNumMeshes; ++a) {
aiMesh* srcMesh = pScene->mMeshes[a];
-
std::vector > newMeshes;
if(splitList[a]) {
@@ -150,8 +131,7 @@ void DeboneProcess::Execute( aiScene* pScene)
// and destroy the source mesh. It should be completely contained inside the new submeshes
delete srcMesh;
- }
- else {
+ } else {
// Mesh is kept unchanged - store it's new place in the mesh array
mSubMeshIndices[a].emplace_back(static_cast(meshes.size()), (aiNode *)nullptr);
meshes.push_back(srcMesh);
@@ -173,8 +153,7 @@ void DeboneProcess::Execute( aiScene* pScene)
// ------------------------------------------------------------------------------------------------
// Counts bones total/removable in a given mesh.
-bool DeboneProcess::ConsiderMesh(const aiMesh* pMesh)
-{
+bool DeboneProcess::ConsiderMesh(const aiMesh* pMesh) {
if(!pMesh->HasBones()) {
return false;
}
@@ -193,25 +172,23 @@ bool DeboneProcess::ConsiderMesh(const aiMesh* pMesh)
for(unsigned int i=0;imNumBones;i++) {
for(unsigned int j=0;jmBones[i]->mNumWeights;j++) {
float w = pMesh->mBones[i]->mWeights[j].mWeight;
-
- if(w==0.0f) {
+ if (w == 0.0f) {
continue;
}
unsigned int vid = pMesh->mBones[i]->mWeights[j].mVertexId;
- if(w>=mThreshold) {
-
- if(vertexBones[vid]!=cUnowned) {
- if(vertexBones[vid]==i) //double entry
- {
+ if (w >= mThreshold) {
+ if (vertexBones[vid] != cUnowned) {
+ //double entry
+ if(vertexBones[vid]==i) {
ASSIMP_LOG_WARN("Encountered double entry in bone weights");
- }
- else //TODO: track attraction in order to break tie
- {
+ } else {
+ //TODO: track attraction in order to break tie
vertexBones[vid] = cCoowned;
}
- }
- else vertexBones[vid] = i;
+ } else {
+ vertexBones[vid] = i;
+ }
}
if(!isBoneNecessary[i]) {
@@ -227,13 +204,16 @@ bool DeboneProcess::ConsiderMesh(const aiMesh* pMesh)
if(isInterstitialRequired) {
for(unsigned int i=0;imNumFaces;i++) {
unsigned int v = vertexBones[pMesh->mFaces[i].mIndices[0]];
-
- for(unsigned int j=1;jmFaces[i].mNumIndices;j++) {
+ for (unsigned int j=1;jmFaces[i].mNumIndices;j++) {
unsigned int w = vertexBones[pMesh->mFaces[i].mIndices[j]];
- if(v!=w) {
- if(vmNumBones) isBoneNecessary[v] = true;
- if(wmNumBones) isBoneNecessary[w] = true;
+ if (v != w) {
+ if(vmNumBones) {
+ isBoneNecessary[v] = true;
+ }
+ if (wmNumBones) {
+ isBoneNecessary[w] = true;
+ }
}
}
}
@@ -252,8 +232,7 @@ bool DeboneProcess::ConsiderMesh(const aiMesh* pMesh)
// ------------------------------------------------------------------------------------------------
// Splits the given mesh by bone count.
-void DeboneProcess::SplitMesh( const aiMesh* pMesh, std::vector< std::pair< aiMesh*,const aiBone* > >& poNewMeshes) const
-{
+void DeboneProcess::SplitMesh( const aiMesh* pMesh, std::vector< std::pair< aiMesh*,const aiBone* > >& poNewMeshes) const {
// same deal here as ConsiderMesh basically
std::vector isBoneNecessary(pMesh->mNumBones,false);
@@ -371,8 +350,7 @@ void DeboneProcess::SplitMesh( const aiMesh* pMesh, std::vector< std::pair< aiMe
// ------------------------------------------------------------------------------------------------
// Recursively updates the node's mesh list to account for the changed mesh list
-void DeboneProcess::UpdateNode(aiNode* pNode) const
-{
+void DeboneProcess::UpdateNode(aiNode* pNode) const {
// rebuild the node's mesh index list
std::vector newMeshList;
@@ -430,8 +408,7 @@ void DeboneProcess::UpdateNode(aiNode* pNode) const
// ------------------------------------------------------------------------------------------------
// Apply the node transformation to a mesh
-void DeboneProcess::ApplyTransform(aiMesh* mesh, const aiMatrix4x4& mat)const
-{
+void DeboneProcess::ApplyTransform(aiMesh* mesh, const aiMatrix4x4& mat)const {
// Check whether we need to transform the coordinates at all
if (!mat.IsIdentity()) {
diff --git a/code/PostProcessing/DeboneProcess.h b/code/PostProcessing/DeboneProcess.h
index cb072b7eb..ae4448e0e 100644
--- a/code/PostProcessing/DeboneProcess.h
+++ b/code/PostProcessing/DeboneProcess.h
@@ -70,7 +70,7 @@ namespace Assimp {
class DeboneProcess : public BaseProcess {
public:
DeboneProcess();
- ~DeboneProcess();
+ ~DeboneProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag.
@@ -79,14 +79,14 @@ public:
* @return true if the process is present in this flag fields,
* false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Called prior to ExecuteOnScene().
* The function is a request to the process to update its configuration
* basing on the Importer's configuration property list.
*/
- void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
protected:
// -------------------------------------------------------------------
@@ -94,7 +94,7 @@ protected:
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
/** Counts bones total/removable in a given mesh.
diff --git a/code/PostProcessing/DropFaceNormalsProcess.cpp b/code/PostProcessing/DropFaceNormalsProcess.cpp
index f85daa588..223482374 100644
--- a/code/PostProcessing/DropFaceNormalsProcess.cpp
+++ b/code/PostProcessing/DropFaceNormalsProcess.cpp
@@ -54,14 +54,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
using namespace Assimp;
-// ------------------------------------------------------------------------------------------------
-// Constructor to be privately used by Importer
-DropFaceNormalsProcess::DropFaceNormalsProcess() = default;
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-DropFaceNormalsProcess::~DropFaceNormalsProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool DropFaceNormalsProcess::IsActive( unsigned int pFlags) const {
diff --git a/code/PostProcessing/DropFaceNormalsProcess.h b/code/PostProcessing/DropFaceNormalsProcess.h
index 50abdc727..df542f2ba 100644
--- a/code/PostProcessing/DropFaceNormalsProcess.h
+++ b/code/PostProcessing/DropFaceNormalsProcess.h
@@ -55,8 +55,8 @@ namespace Assimp {
*/
class ASSIMP_API_WINONLY DropFaceNormalsProcess : public BaseProcess {
public:
- DropFaceNormalsProcess();
- ~DropFaceNormalsProcess();
+ DropFaceNormalsProcess() = default;
+ ~DropFaceNormalsProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
@@ -64,15 +64,14 @@ public:
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
-
+ void Execute( aiScene* pScene) override;
private:
bool DropMeshFaceNormals(aiMesh* pcMesh);
diff --git a/code/PostProcessing/EmbedTexturesProcess.cpp b/code/PostProcessing/EmbedTexturesProcess.cpp
index dc7e54ac1..d5d2ef872 100644
--- a/code/PostProcessing/EmbedTexturesProcess.cpp
+++ b/code/PostProcessing/EmbedTexturesProcess.cpp
@@ -49,10 +49,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
using namespace Assimp;
-EmbedTexturesProcess::EmbedTexturesProcess() = default;
-
-EmbedTexturesProcess::~EmbedTexturesProcess() = default;
-
bool EmbedTexturesProcess::IsActive(unsigned int pFlags) const {
return (pFlags & aiProcess_EmbedTextures) != 0;
}
diff --git a/code/PostProcessing/EmbedTexturesProcess.h b/code/PostProcessing/EmbedTexturesProcess.h
index c3e63612c..77d4d9c72 100644
--- a/code/PostProcessing/EmbedTexturesProcess.h
+++ b/code/PostProcessing/EmbedTexturesProcess.h
@@ -62,19 +62,19 @@ namespace Assimp {
class ASSIMP_API EmbedTexturesProcess : public BaseProcess {
public:
/// The default class constructor.
- EmbedTexturesProcess();
+ EmbedTexturesProcess() = default;
/// The class destructor.
- virtual ~EmbedTexturesProcess();
+ ~EmbedTexturesProcess() override = default;
/// Overwritten, @see BaseProcess
- virtual bool IsActive(unsigned int pFlags) const;
+ bool IsActive(unsigned int pFlags) const override;
/// Overwritten, @see BaseProcess
- virtual void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
/// Overwritten, @see BaseProcess
- virtual void Execute(aiScene* pScene);
+ virtual void Execute(aiScene* pScene) override;
private:
// Resolve the path and add the file content to the scene as a texture.
diff --git a/code/PostProcessing/FindDegenerates.cpp b/code/PostProcessing/FindDegenerates.cpp
index 344979949..5874c17d2 100644
--- a/code/PostProcessing/FindDegenerates.cpp
+++ b/code/PostProcessing/FindDegenerates.cpp
@@ -45,6 +45,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "ProcessHelper.h"
#include "FindDegenerates.h"
+#include "Geometry/GeometryUtils.h"
#include
@@ -63,10 +64,6 @@ FindDegeneratesProcess::FindDegeneratesProcess() :
// empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-FindDegeneratesProcess::~FindDegeneratesProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool FindDegeneratesProcess::IsActive( unsigned int pFlags) const {
@@ -132,37 +129,6 @@ static void updateSceneGraph(aiNode* pNode, const std::unordered_mapmVertices[ face.mIndices[ 0 ] ] );
- aiVector3D vB( mesh->mVertices[ face.mIndices[ 1 ] ] );
- aiVector3D vC( mesh->mVertices[ face.mIndices[ 2 ] ] );
-
- ai_real a( distance3D( vA, vB ) );
- ai_real b( distance3D( vB, vC ) );
- ai_real c( distance3D( vC, vA ) );
- area = heron( a, b, c );
-
- return area;
-}
-
// ------------------------------------------------------------------------------------------------
// Executes the post processing step on the given imported mesh
bool FindDegeneratesProcess::ExecuteOnMesh( aiMesh* mesh) {
@@ -218,7 +184,7 @@ bool FindDegeneratesProcess::ExecuteOnMesh( aiMesh* mesh) {
if ( mConfigCheckAreaOfTriangle ) {
if ( face.mNumIndices == 3 ) {
- ai_real area = calculateAreaOfTriangle( face, mesh );
+ ai_real area = GeometryUtils::calculateAreaOfTriangle( face, mesh );
if (area < ai_epsilon) {
if ( mConfigRemoveDegenerates ) {
remove_me[ a ] = true;
diff --git a/code/PostProcessing/FindDegenerates.h b/code/PostProcessing/FindDegenerates.h
index 6fe1e929b..6b37a47cf 100644
--- a/code/PostProcessing/FindDegenerates.h
+++ b/code/PostProcessing/FindDegenerates.h
@@ -59,19 +59,19 @@ namespace Assimp {
class ASSIMP_API FindDegeneratesProcess : public BaseProcess {
public:
FindDegeneratesProcess();
- ~FindDegeneratesProcess();
+ ~FindDegeneratesProcess() override = default;
// -------------------------------------------------------------------
// Check whether step is active
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
// Execute step on a given scene
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
// Setup import settings
- void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
// -------------------------------------------------------------------
// Execute step on a given mesh
@@ -105,23 +105,19 @@ private:
bool mConfigCheckAreaOfTriangle;
};
-inline
-void FindDegeneratesProcess::EnableInstantRemoval(bool enabled) {
+inline void FindDegeneratesProcess::EnableInstantRemoval(bool enabled) {
mConfigRemoveDegenerates = enabled;
}
-inline
-bool FindDegeneratesProcess::IsInstantRemoval() const {
+inline bool FindDegeneratesProcess::IsInstantRemoval() const {
return mConfigRemoveDegenerates;
}
-inline
-void FindDegeneratesProcess::EnableAreaCheck( bool enabled ) {
+inline void FindDegeneratesProcess::EnableAreaCheck( bool enabled ) {
mConfigCheckAreaOfTriangle = enabled;
}
-inline
-bool FindDegeneratesProcess::isAreaCheckEnabled() const {
+inline bool FindDegeneratesProcess::isAreaCheckEnabled() const {
return mConfigCheckAreaOfTriangle;
}
diff --git a/code/PostProcessing/FindInstancesProcess.cpp b/code/PostProcessing/FindInstancesProcess.cpp
index 07a0f66db..55974b1c3 100644
--- a/code/PostProcessing/FindInstancesProcess.cpp
+++ b/code/PostProcessing/FindInstancesProcess.cpp
@@ -58,10 +58,6 @@ FindInstancesProcess::FindInstancesProcess()
: configSpeedFlag (false)
{}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-FindInstancesProcess::~FindInstancesProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool FindInstancesProcess::IsActive( unsigned int pFlags) const
diff --git a/code/PostProcessing/FindInstancesProcess.h b/code/PostProcessing/FindInstancesProcess.h
index b501d88d5..6927301ca 100644
--- a/code/PostProcessing/FindInstancesProcess.h
+++ b/code/PostProcessing/FindInstancesProcess.h
@@ -50,7 +50,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "PostProcessing/ProcessHelper.h"
class FindInstancesProcessTest;
-namespace Assimp {
+
+namespace Assimp {
// -------------------------------------------------------------------------------
/** @brief Get a pseudo(!)-hash representing a mesh.
@@ -60,8 +61,7 @@ namespace Assimp {
* @param in Input mesh
* @return Hash.
*/
-inline
-uint64_t GetMeshHash(aiMesh* in) {
+inline uint64_t GetMeshHash(aiMesh* in) {
ai_assert(nullptr != in);
// ... get an unique value representing the vertex format of the mesh
@@ -83,8 +83,7 @@ uint64_t GetMeshHash(aiMesh* in) {
* @param e Epsilon
* @return true if the arrays are identical
*/
-inline
-bool CompareArrays(const aiVector3D* first, const aiVector3D* second,
+inline bool CompareArrays(const aiVector3D* first, const aiVector3D* second,
unsigned int size, float e) {
for (const aiVector3D* end = first+size; first != end; ++first,++second) {
if ( (*first - *second).SquareLength() >= e)
@@ -107,31 +106,27 @@ inline bool CompareArrays(const aiColor4D* first, const aiColor4D* second,
// ---------------------------------------------------------------------------
/** @brief A post-processing steps to search for instanced meshes
*/
-class FindInstancesProcess : public BaseProcess
-{
+class FindInstancesProcess : public BaseProcess {
public:
-
FindInstancesProcess();
- ~FindInstancesProcess();
+ ~FindInstancesProcess() override = default;
-public:
// -------------------------------------------------------------------
// Check whether step is active in given flags combination
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
// Execute step on a given scene
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
// Setup properties prior to executing the process
- void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
private:
-
bool configSpeedFlag;
-
}; // ! end class FindInstancesProcess
+
} // ! end namespace Assimp
#endif // !! AI_FINDINSTANCES_H_INC
diff --git a/code/PostProcessing/FindInvalidDataProcess.cpp b/code/PostProcessing/FindInvalidDataProcess.cpp
index c65208cbd..bb8e365a1 100644
--- a/code/PostProcessing/FindInvalidDataProcess.cpp
+++ b/code/PostProcessing/FindInvalidDataProcess.cpp
@@ -60,10 +60,6 @@ FindInvalidDataProcess::FindInvalidDataProcess() :
// nothing to do here
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-FindInvalidDataProcess::~FindInvalidDataProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool FindInvalidDataProcess::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/FindInvalidDataProcess.h b/code/PostProcessing/FindInvalidDataProcess.h
index 5ea895c59..024eb9b1e 100644
--- a/code/PostProcessing/FindInvalidDataProcess.h
+++ b/code/PostProcessing/FindInvalidDataProcess.h
@@ -64,35 +64,37 @@ namespace Assimp {
* which have zero normal vectors. */
class ASSIMP_API FindInvalidDataProcess : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
FindInvalidDataProcess();
- ~FindInvalidDataProcess();
+ ~FindInvalidDataProcess() override = default;
// -------------------------------------------------------------------
- //
- bool IsActive(unsigned int pFlags) const;
+ /// Returns active state.
+ bool IsActive(unsigned int pFlags) const override;
// -------------------------------------------------------------------
- // Setup import settings
- void SetupProperties(const Importer *pImp);
+ /// Setup import settings
+ void SetupProperties(const Importer *pImp) override;
// -------------------------------------------------------------------
- // Run the step
- void Execute(aiScene *pScene);
+ /// Run the step
+ void Execute(aiScene *pScene) override;
// -------------------------------------------------------------------
- /** Executes the post-processing step on the given mesh
- * @param pMesh The mesh to process.
- * @return 0 - nothing, 1 - removed sth, 2 - please delete me */
+ /// Executes the post-processing step on the given mesh
+ /// @param pMesh The mesh to process.
+ /// @return 0 - nothing, 1 - removed sth, 2 - please delete me */
int ProcessMesh(aiMesh *pMesh);
// -------------------------------------------------------------------
- /** Executes the post-processing step on the given animation
- * @param anim The animation to process. */
+ /// Executes the post-processing step on the given animation
+ /// @param anim The animation to process. */
void ProcessAnimation(aiAnimation *anim);
// -------------------------------------------------------------------
- /** Executes the post-processing step on the given anim channel
- * @param anim The animation channel to process.*/
+ /// Executes the post-processing step on the given anim channel
+ /// @param anim The animation channel to process.*/
void ProcessAnimationChannel(aiNodeAnim *anim);
private:
diff --git a/code/PostProcessing/FixNormalsStep.cpp b/code/PostProcessing/FixNormalsStep.cpp
index 3791bd35a..54ac05cc8 100644
--- a/code/PostProcessing/FixNormalsStep.cpp
+++ b/code/PostProcessing/FixNormalsStep.cpp
@@ -56,26 +56,15 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
using namespace Assimp;
-
-// ------------------------------------------------------------------------------------------------
-// Constructor to be privately used by Importer
-FixInfacingNormalsProcess::FixInfacingNormalsProcess() = default;
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-FixInfacingNormalsProcess::~FixInfacingNormalsProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
-bool FixInfacingNormalsProcess::IsActive( unsigned int pFlags) const
-{
+bool FixInfacingNormalsProcess::IsActive( unsigned int pFlags) const {
return (pFlags & aiProcess_FixInfacingNormals) != 0;
}
// ------------------------------------------------------------------------------------------------
// Executes the post processing step on the given imported data.
-void FixInfacingNormalsProcess::Execute( aiScene* pScene)
-{
+void FixInfacingNormalsProcess::Execute( aiScene* pScene) {
ASSIMP_LOG_DEBUG("FixInfacingNormalsProcess begin");
bool bHas( false );
diff --git a/code/PostProcessing/FixNormalsStep.h b/code/PostProcessing/FixNormalsStep.h
index b7d3ba386..20be1958b 100644
--- a/code/PostProcessing/FixNormalsStep.h
+++ b/code/PostProcessing/FixNormalsStep.h
@@ -49,8 +49,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct aiMesh;
-namespace Assimp
-{
+namespace Assimp {
// ---------------------------------------------------------------------------
/** The FixInfacingNormalsProcess tries to determine whether the normal
@@ -59,8 +58,10 @@ namespace Assimp
*/
class FixInfacingNormalsProcess : public BaseProcess {
public:
- FixInfacingNormalsProcess();
- ~FixInfacingNormalsProcess();
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
+ FixInfacingNormalsProcess() = default;
+ ~FixInfacingNormalsProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
@@ -68,14 +69,14 @@ public:
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
protected:
diff --git a/code/PostProcessing/GenBoundingBoxesProcess.cpp b/code/PostProcessing/GenBoundingBoxesProcess.cpp
index 52a0861e5..ca8e4d6d0 100644
--- a/code/PostProcessing/GenBoundingBoxesProcess.cpp
+++ b/code/PostProcessing/GenBoundingBoxesProcess.cpp
@@ -48,10 +48,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace Assimp {
-GenBoundingBoxesProcess::GenBoundingBoxesProcess() = default;
-
-GenBoundingBoxesProcess::~GenBoundingBoxesProcess() = default;
-
bool GenBoundingBoxesProcess::IsActive(unsigned int pFlags) const {
return 0 != ( pFlags & aiProcess_GenBoundingBoxes );
}
diff --git a/code/PostProcessing/GenBoundingBoxesProcess.h b/code/PostProcessing/GenBoundingBoxesProcess.h
index 0b7591b6d..0cf8514f4 100644
--- a/code/PostProcessing/GenBoundingBoxesProcess.h
+++ b/code/PostProcessing/GenBoundingBoxesProcess.h
@@ -19,7 +19,7 @@ conditions are met:
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
-
+s
* Neither the name of the assimp team, nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
@@ -54,18 +54,23 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace Assimp {
-/** Post-processing process to find axis-aligned bounding volumes for amm meshes
- * used in a scene
+/**
+ * @brief Post-processing process to find axis-aligned bounding volumes for amm meshes
+ * used in a scene.
*/
class ASSIMP_API GenBoundingBoxesProcess : public BaseProcess {
public:
- /// The class constructor.
- GenBoundingBoxesProcess();
- /// The class destructor.
- ~GenBoundingBoxesProcess();
- /// Will return true, if aiProcess_GenBoundingBoxes is defined.
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
+ GenBoundingBoxesProcess() = default;
+ ~GenBoundingBoxesProcess() override = default;
+
+ // -------------------------------------------------------------------
+ /// @brief Will return true, if aiProcess_GenBoundingBoxes is defined.
bool IsActive(unsigned int pFlags) const override;
- /// The execution callback.
+
+ // -------------------------------------------------------------------
+ /// @brief The execution callback.
void Execute(aiScene* pScene) override;
};
diff --git a/code/PostProcessing/GenFaceNormalsProcess.cpp b/code/PostProcessing/GenFaceNormalsProcess.cpp
index d3520d4b2..1d259ce22 100644
--- a/code/PostProcessing/GenFaceNormalsProcess.cpp
+++ b/code/PostProcessing/GenFaceNormalsProcess.cpp
@@ -54,14 +54,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
using namespace Assimp;
-// ------------------------------------------------------------------------------------------------
-// Constructor to be privately used by Importer
-GenFaceNormalsProcess::GenFaceNormalsProcess() = default;
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-GenFaceNormalsProcess::~GenFaceNormalsProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool GenFaceNormalsProcess::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/GenFaceNormalsProcess.h b/code/PostProcessing/GenFaceNormalsProcess.h
index c2f157e20..94794631e 100644
--- a/code/PostProcessing/GenFaceNormalsProcess.h
+++ b/code/PostProcessing/GenFaceNormalsProcess.h
@@ -47,35 +47,33 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "Common/BaseProcess.h"
#include
-namespace Assimp
-{
+namespace Assimp {
// ---------------------------------------------------------------------------
-/** The GenFaceNormalsProcess computes face normals for all faces of all meshes
-*/
-class ASSIMP_API_WINONLY GenFaceNormalsProcess : public BaseProcess
-{
+/**
+ * @brief The GenFaceNormalsProcess computes face normals for all faces of all meshes
+ */
+class ASSIMP_API_WINONLY GenFaceNormalsProcess : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
+ GenFaceNormalsProcess() = default;
+ ~GenFaceNormalsProcess() override = default;
- GenFaceNormalsProcess();
- ~GenFaceNormalsProcess();
-
-public:
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
* @param pFlags The processing flags the importer was called with. A bitwise
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
-
+ void Execute( aiScene* pScene) override;
private:
bool GenMeshFaceNormals(aiMesh* pcMesh);
diff --git a/code/PostProcessing/GenVertexNormalsProcess.cpp b/code/PostProcessing/GenVertexNormalsProcess.cpp
index 5b9033383..c8afac297 100644
--- a/code/PostProcessing/GenVertexNormalsProcess.cpp
+++ b/code/PostProcessing/GenVertexNormalsProcess.cpp
@@ -60,10 +60,6 @@ GenVertexNormalsProcess::GenVertexNormalsProcess() :
// empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-GenVertexNormalsProcess::~GenVertexNormalsProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool GenVertexNormalsProcess::IsActive(unsigned int pFlags) const {
@@ -109,10 +105,10 @@ void GenVertexNormalsProcess::Execute(aiScene *pScene) {
// Executes the post processing step on the given imported data.
bool GenVertexNormalsProcess::GenMeshVertexNormals(aiMesh *pMesh, unsigned int meshIndex) {
if (nullptr != pMesh->mNormals) {
- if (force_)
- delete[] pMesh->mNormals;
- else
+ if (!force_) {
return false;
+ }
+ delete[] pMesh->mNormals;
}
// If the mesh consists of lines and/or points but not of
@@ -144,8 +140,9 @@ bool GenVertexNormalsProcess::GenMeshVertexNormals(aiMesh *pMesh, unsigned int m
const aiVector3D *pV3 = &pMesh->mVertices[face.mIndices[face.mNumIndices - 1]];
// Boolean XOR - if either but not both of these flags is set, then the winding order has
// changed and the cross product to calculate the normal needs to be reversed
- if (flippedWindingOrder_ != leftHanded_)
+ if (flippedWindingOrder_ != leftHanded_) {
std::swap(pV2, pV3);
+ }
const aiVector3D vNor = ((*pV2 - *pV1) ^ (*pV3 - *pV1)).NormalizeSafe();
for (unsigned int i = 0; i < face.mNumIndices; ++i) {
diff --git a/code/PostProcessing/GenVertexNormalsProcess.h b/code/PostProcessing/GenVertexNormalsProcess.h
index 370bf42b1..b7db9c4f2 100644
--- a/code/PostProcessing/GenVertexNormalsProcess.h
+++ b/code/PostProcessing/GenVertexNormalsProcess.h
@@ -60,8 +60,10 @@ namespace Assimp {
*/
class ASSIMP_API GenVertexNormalsProcess : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
GenVertexNormalsProcess();
- ~GenVertexNormalsProcess();
+ ~GenVertexNormalsProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag.
@@ -70,22 +72,21 @@ public:
* @return true if the process is present in this flag fields,
* false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Called prior to ExecuteOnScene().
* The function is a request to the process to update its configuration
* basing on the Importer's configuration property list.
*/
- void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
-
+ void Execute( aiScene* pScene) override;
// setter for configMaxAngle
inline void SetMaxSmoothAngle(ai_real f) {
diff --git a/code/PostProcessing/ImproveCacheLocality.cpp b/code/PostProcessing/ImproveCacheLocality.cpp
index 197856171..9336d6b17 100644
--- a/code/PostProcessing/ImproveCacheLocality.cpp
+++ b/code/PostProcessing/ImproveCacheLocality.cpp
@@ -68,10 +68,6 @@ ImproveCacheLocalityProcess::ImproveCacheLocalityProcess()
// empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-ImproveCacheLocalityProcess::~ImproveCacheLocalityProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool ImproveCacheLocalityProcess::IsActive( unsigned int pFlags) const {
diff --git a/code/PostProcessing/ImproveCacheLocality.h b/code/PostProcessing/ImproveCacheLocality.h
index b2074a17c..6f4d55719 100644
--- a/code/PostProcessing/ImproveCacheLocality.h
+++ b/code/PostProcessing/ImproveCacheLocality.h
@@ -51,8 +51,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct aiMesh;
-namespace Assimp
-{
+namespace Assimp {
// ---------------------------------------------------------------------------
/** The ImproveCacheLocalityProcess reorders all faces for improved vertex
@@ -61,26 +60,24 @@ namespace Assimp
*
* @note This step expects triagulated input data.
*/
-class ImproveCacheLocalityProcess : public BaseProcess
-{
+class ImproveCacheLocalityProcess : public BaseProcess {
public:
-
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
ImproveCacheLocalityProcess();
- ~ImproveCacheLocalityProcess();
-
-public:
+ ~ImproveCacheLocalityProcess() override = default;
// -------------------------------------------------------------------
// Check whether the pp step is active
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
// Executes the pp step on a given scene
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
// Configures the pp step
- void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
protected:
// -------------------------------------------------------------------
diff --git a/code/PostProcessing/JoinVerticesProcess.h b/code/PostProcessing/JoinVerticesProcess.h
index b05d74ef5..aa8dc5794 100644
--- a/code/PostProcessing/JoinVerticesProcess.h
+++ b/code/PostProcessing/JoinVerticesProcess.h
@@ -51,8 +51,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct aiMesh;
-namespace Assimp
-{
+namespace Assimp {
// ---------------------------------------------------------------------------
/** The JoinVerticesProcess unites identical vertices in all imported meshes.
@@ -65,12 +64,9 @@ namespace Assimp
class ASSIMP_API JoinVerticesProcess : public BaseProcess {
public:
// -------------------------------------------------------------------
- /// @brief The default class constructor.
- JoinVerticesProcess() = default;
-
- // -------------------------------------------------------------------
- /// @brief The default class destructor.
- ~JoinVerticesProcess() = default;
+ /// The default class constructor / destructor.
+ JoinVerticesProcess() = default;
+ ~JoinVerticesProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
@@ -78,14 +74,14 @@ public:
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
/** Unites identical vertices in the given mesh.
diff --git a/code/PostProcessing/LimitBoneWeightsProcess.cpp b/code/PostProcessing/LimitBoneWeightsProcess.cpp
index 51fb43dfc..7047ec0f1 100644
--- a/code/PostProcessing/LimitBoneWeightsProcess.cpp
+++ b/code/PostProcessing/LimitBoneWeightsProcess.cpp
@@ -53,11 +53,9 @@ namespace Assimp {
// ------------------------------------------------------------------------------------------------
// Constructor to be privately used by Importer
-LimitBoneWeightsProcess::LimitBoneWeightsProcess() : mMaxWeights(AI_LMW_MAX_WEIGHTS) {}
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-LimitBoneWeightsProcess::~LimitBoneWeightsProcess() = default;
+LimitBoneWeightsProcess::LimitBoneWeightsProcess() : mMaxWeights(AI_LMW_MAX_WEIGHTS) {
+ // empty
+}
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
diff --git a/code/PostProcessing/LimitBoneWeightsProcess.h b/code/PostProcessing/LimitBoneWeightsProcess.h
index 22d286b68..b19d536cf 100644
--- a/code/PostProcessing/LimitBoneWeightsProcess.h
+++ b/code/PostProcessing/LimitBoneWeightsProcess.h
@@ -74,8 +74,10 @@ namespace Assimp {
*/
class ASSIMP_API LimitBoneWeightsProcess : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
LimitBoneWeightsProcess();
- ~LimitBoneWeightsProcess();
+ ~LimitBoneWeightsProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag.
@@ -84,27 +86,27 @@ public:
* @return true if the process is present in this flag fields,
* false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Called prior to ExecuteOnScene().
* The function is a request to the process to update its configuration
* basing on the Importer's configuration property list.
*/
- void SetupProperties(const Importer* pImp);
-
- // -------------------------------------------------------------------
- /** Limits the bone weight count for all vertices in the given mesh.
- * @param pMesh The mesh to process.
- */
- void ProcessMesh( aiMesh* pMesh);
+ void SetupProperties(const Importer* pImp) override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
+
+ // -------------------------------------------------------------------
+ /** Limits the bone weight count for all vertices in the given mesh.
+ * @param pMesh The mesh to process.
+ */
+ void ProcessMesh( aiMesh* pMesh);
// -------------------------------------------------------------------
/** Describes a bone weight on a vertex */
diff --git a/code/PostProcessing/MakeVerboseFormat.cpp b/code/PostProcessing/MakeVerboseFormat.cpp
index 0f5276cf3..1cc2fdc02 100644
--- a/code/PostProcessing/MakeVerboseFormat.cpp
+++ b/code/PostProcessing/MakeVerboseFormat.cpp
@@ -49,10 +49,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
using namespace Assimp;
-// ------------------------------------------------------------------------------------------------
-MakeVerboseFormatProcess::MakeVerboseFormatProcess() = default;
-// ------------------------------------------------------------------------------------------------
-MakeVerboseFormatProcess::~MakeVerboseFormatProcess() = default;
// ------------------------------------------------------------------------------------------------
// Executes the post processing step on the given imported data.
void MakeVerboseFormatProcess::Execute(aiScene *pScene) {
diff --git a/code/PostProcessing/MakeVerboseFormat.h b/code/PostProcessing/MakeVerboseFormat.h
index 6b81da622..f21f5919e 100644
--- a/code/PostProcessing/MakeVerboseFormat.h
+++ b/code/PostProcessing/MakeVerboseFormat.h
@@ -66,22 +66,19 @@ namespace Assimp {
* The step has been added because it was required by the viewer, however
* it has been moved to the main library since others might find it
* useful, too. */
-class ASSIMP_API_WINONLY MakeVerboseFormatProcess : public BaseProcess
-{
-public:
-
-
- MakeVerboseFormatProcess();
- ~MakeVerboseFormatProcess();
-
+class ASSIMP_API_WINONLY MakeVerboseFormatProcess : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
+ MakeVerboseFormatProcess() = default;
+ ~MakeVerboseFormatProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
* @param pFlags The processing flags the importer was called with. A bitwise
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not */
- bool IsActive( unsigned int /*pFlags*/ ) const
+ bool IsActive( unsigned int /*pFlags*/ ) const override
{
// NOTE: There is no direct flag that corresponds to
// this postprocess step.
@@ -92,7 +89,7 @@ public:
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at. */
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
public:
diff --git a/code/PostProcessing/OptimizeGraph.cpp b/code/PostProcessing/OptimizeGraph.cpp
index 26b06e9b6..bcd654634 100644
--- a/code/PostProcessing/OptimizeGraph.cpp
+++ b/code/PostProcessing/OptimizeGraph.cpp
@@ -78,10 +78,6 @@ OptimizeGraphProcess::OptimizeGraphProcess() :
// empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-OptimizeGraphProcess::~OptimizeGraphProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool OptimizeGraphProcess::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/OptimizeGraph.h b/code/PostProcessing/OptimizeGraph.h
index f5caa139c..23e59e67d 100644
--- a/code/PostProcessing/OptimizeGraph.h
+++ b/code/PostProcessing/OptimizeGraph.h
@@ -71,8 +71,10 @@ namespace Assimp {
*/
class OptimizeGraphProcess : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
OptimizeGraphProcess();
- ~OptimizeGraphProcess();
+ ~OptimizeGraphProcess() override = default;
// -------------------------------------------------------------------
bool IsActive( unsigned int pFlags) const override;
diff --git a/code/PostProcessing/OptimizeMeshes.cpp b/code/PostProcessing/OptimizeMeshes.cpp
index a8c01e2d7..0fd597808 100644
--- a/code/PostProcessing/OptimizeMeshes.cpp
+++ b/code/PostProcessing/OptimizeMeshes.cpp
@@ -69,10 +69,6 @@ OptimizeMeshesProcess::OptimizeMeshesProcess()
// empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-OptimizeMeshesProcess::~OptimizeMeshesProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool OptimizeMeshesProcess::IsActive( unsigned int pFlags) const
diff --git a/code/PostProcessing/OptimizeMeshes.h b/code/PostProcessing/OptimizeMeshes.h
index b80f98d5d..0b062959a 100644
--- a/code/PostProcessing/OptimizeMeshes.h
+++ b/code/PostProcessing/OptimizeMeshes.h
@@ -68,11 +68,10 @@ namespace Assimp {
*/
class OptimizeMeshesProcess : public BaseProcess {
public:
- /// @brief The class constructor.
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
OptimizeMeshesProcess();
-
- /// @brief The class destructor.
- ~OptimizeMeshesProcess();
+ ~OptimizeMeshesProcess() override = default;
/** @brief Internal utility to store additional mesh info
*/
@@ -94,16 +93,14 @@ public:
unsigned int output_id;
};
-public:
// -------------------------------------------------------------------
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
- void SetupProperties(const Importer* pImp);
-
+ void SetupProperties(const Importer* pImp) override;
// -------------------------------------------------------------------
/** @brief Specify whether you want meshes with different
diff --git a/code/PostProcessing/PretransformVertices.cpp b/code/PostProcessing/PretransformVertices.cpp
index 9ac90d277..b6bb6155e 100644
--- a/code/PostProcessing/PretransformVertices.cpp
+++ b/code/PostProcessing/PretransformVertices.cpp
@@ -68,10 +68,6 @@ PretransformVertices::PretransformVertices() :
// empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-PretransformVertices::~PretransformVertices() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool PretransformVertices::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/PretransformVertices.h b/code/PostProcessing/PretransformVertices.h
index 14e5139ec..7c2b5e99e 100644
--- a/code/PostProcessing/PretransformVertices.h
+++ b/code/PostProcessing/PretransformVertices.h
@@ -68,8 +68,10 @@ namespace Assimp {
*/
class ASSIMP_API PretransformVertices : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
PretransformVertices();
- ~PretransformVertices();
+ ~PretransformVertices() override = default;
// -------------------------------------------------------------------
// Check whether step is active
diff --git a/code/PostProcessing/RemoveRedundantMaterials.cpp b/code/PostProcessing/RemoveRedundantMaterials.cpp
index 3c3cd59e0..dbc3c8822 100644
--- a/code/PostProcessing/RemoveRedundantMaterials.cpp
+++ b/code/PostProcessing/RemoveRedundantMaterials.cpp
@@ -62,10 +62,6 @@ RemoveRedundantMatsProcess::RemoveRedundantMatsProcess()
// nothing to do here
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-RemoveRedundantMatsProcess::~RemoveRedundantMatsProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool RemoveRedundantMatsProcess::IsActive( unsigned int pFlags) const
diff --git a/code/PostProcessing/RemoveRedundantMaterials.h b/code/PostProcessing/RemoveRedundantMaterials.h
index e8c1478fd..1b42bea55 100644
--- a/code/PostProcessing/RemoveRedundantMaterials.h
+++ b/code/PostProcessing/RemoveRedundantMaterials.h
@@ -59,23 +59,22 @@ namespace Assimp {
*/
class ASSIMP_API RemoveRedundantMatsProcess : public BaseProcess {
public:
- /// The default class constructor.
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
RemoveRedundantMatsProcess();
-
- /// The class destructor.
- ~RemoveRedundantMatsProcess();
+ ~RemoveRedundantMatsProcess() override = default;
// -------------------------------------------------------------------
// Check whether step is active
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
// Execute step on a given scene
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
// Setup import settings
- void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
// -------------------------------------------------------------------
/** @brief Set list of fixed (inmutable) materials
diff --git a/code/PostProcessing/RemoveVCProcess.cpp b/code/PostProcessing/RemoveVCProcess.cpp
index 8bbe791f6..35047dc0a 100644
--- a/code/PostProcessing/RemoveVCProcess.cpp
+++ b/code/PostProcessing/RemoveVCProcess.cpp
@@ -56,10 +56,6 @@ using namespace Assimp;
RemoveVCProcess::RemoveVCProcess() :
configDeleteFlags(), mScene() {}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-RemoveVCProcess::~RemoveVCProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool RemoveVCProcess::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/RemoveVCProcess.h b/code/PostProcessing/RemoveVCProcess.h
index cf1086882..45c0b3a71 100644
--- a/code/PostProcessing/RemoveVCProcess.h
+++ b/code/PostProcessing/RemoveVCProcess.h
@@ -58,11 +58,10 @@ namespace Assimp {
*/
class ASSIMP_API RemoveVCProcess : public BaseProcess {
public:
- /// The default class constructor.
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
RemoveVCProcess();
-
- /// The class destructor.
- ~RemoveVCProcess();
+ ~RemoveVCProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
@@ -70,37 +69,35 @@ public:
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
/** Called prior to ExecuteOnScene().
* The function is a request to the process to update its configuration
* basing on the Importer's configuration property list.
*/
- virtual void SetupProperties(const Importer* pImp);
+ virtual void SetupProperties(const Importer* pImp) override;
// -------------------------------------------------------------------
/** Manually setup the configuration flags for the step
*
* @param Bitwise combination of the #aiComponent enumerated values.
*/
- void SetDeleteFlags(unsigned int f)
- {
+ void SetDeleteFlags(unsigned int f) {
configDeleteFlags = f;
}
// -------------------------------------------------------------------
/** Query the current configuration.
*/
- unsigned int GetDeleteFlags() const
- {
+ unsigned int GetDeleteFlags() const {
return configDeleteFlags;
}
diff --git a/code/PostProcessing/ScaleProcess.cpp b/code/PostProcessing/ScaleProcess.cpp
index 34f68539a..665f28a7e 100644
--- a/code/PostProcessing/ScaleProcess.cpp
+++ b/code/PostProcessing/ScaleProcess.cpp
@@ -47,25 +47,27 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace Assimp {
-ScaleProcess::ScaleProcess()
-: BaseProcess()
-, mScale( AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT ) {
+// ------------------------------------------------------------------------------------------------
+ScaleProcess::ScaleProcess() : BaseProcess(), mScale( AI_CONFIG_GLOBAL_SCALE_FACTOR_DEFAULT ) {
+ // empty
}
-ScaleProcess::~ScaleProcess() = default;
-
+// ------------------------------------------------------------------------------------------------
void ScaleProcess::setScale( ai_real scale ) {
mScale = scale;
}
+// ------------------------------------------------------------------------------------------------
ai_real ScaleProcess::getScale() const {
return mScale;
}
+// ------------------------------------------------------------------------------------------------
bool ScaleProcess::IsActive( unsigned int pFlags ) const {
return ( pFlags & aiProcess_GlobalScale ) != 0;
}
+// ------------------------------------------------------------------------------------------------
void ScaleProcess::SetupProperties( const Importer* pImp ) {
// User scaling
mScale = pImp->GetPropertyFloat( AI_CONFIG_GLOBAL_SCALE_FACTOR_KEY, 1.0f );
@@ -78,6 +80,7 @@ void ScaleProcess::SetupProperties( const Importer* pImp ) {
mScale *= importerScale;
}
+// ------------------------------------------------------------------------------------------------
void ScaleProcess::Execute( aiScene* pScene ) {
if(mScale == 1.0f) {
return; // nothing to scale
@@ -96,37 +99,30 @@ void ScaleProcess::Execute( aiScene* pScene ) {
}
// Process animations and update position transform to new unit system
- for( unsigned int animationID = 0; animationID < pScene->mNumAnimations; animationID++ )
- {
+ for( unsigned int animationID = 0; animationID < pScene->mNumAnimations; animationID++ ) {
aiAnimation* animation = pScene->mAnimations[animationID];
- for( unsigned int animationChannel = 0; animationChannel < animation->mNumChannels; animationChannel++)
- {
+ for( unsigned int animationChannel = 0; animationChannel < animation->mNumChannels; animationChannel++) {
aiNodeAnim* anim = animation->mChannels[animationChannel];
- for( unsigned int posKey = 0; posKey < anim->mNumPositionKeys; posKey++)
- {
+ for( unsigned int posKey = 0; posKey < anim->mNumPositionKeys; posKey++) {
aiVectorKey& vectorKey = anim->mPositionKeys[posKey];
vectorKey.mValue *= mScale;
}
}
}
- for( unsigned int meshID = 0; meshID < pScene->mNumMeshes; meshID++)
- {
+ for( unsigned int meshID = 0; meshID < pScene->mNumMeshes; meshID++) {
aiMesh *mesh = pScene->mMeshes[meshID];
// Reconstruct mesh vertices to the new unit system
- for( unsigned int vertexID = 0; vertexID < mesh->mNumVertices; vertexID++)
- {
+ for( unsigned int vertexID = 0; vertexID < mesh->mNumVertices; vertexID++) {
aiVector3D& vertex = mesh->mVertices[vertexID];
vertex *= mScale;
}
-
// bone placement / scaling
- for( unsigned int boneID = 0; boneID < mesh->mNumBones; boneID++)
- {
+ for( unsigned int boneID = 0; boneID < mesh->mNumBones; boneID++) {
// Reconstruct matrix by transform rather than by scale
// This prevent scale values being changed which can
// be meaningful in some cases
@@ -152,12 +148,10 @@ void ScaleProcess::Execute( aiScene* pScene ) {
// animation mesh processing
// convert by position rather than scale.
- for( unsigned int animMeshID = 0; animMeshID < mesh->mNumAnimMeshes; animMeshID++)
- {
+ for( unsigned int animMeshID = 0; animMeshID < mesh->mNumAnimMeshes; animMeshID++) {
aiAnimMesh * animMesh = mesh->mAnimMeshes[animMeshID];
- for( unsigned int vertexID = 0; vertexID < animMesh->mNumVertices; vertexID++)
- {
+ for( unsigned int vertexID = 0; vertexID < animMesh->mNumVertices; vertexID++) {
aiVector3D& vertex = animMesh->mVertices[vertexID];
vertex *= mScale;
}
@@ -167,16 +161,17 @@ void ScaleProcess::Execute( aiScene* pScene ) {
traverseNodes( pScene->mRootNode );
}
+// ------------------------------------------------------------------------------------------------
void ScaleProcess::traverseNodes( aiNode *node, unsigned int nested_node_id ) {
applyScaling( node );
- for( size_t i = 0; i < node->mNumChildren; i++)
- {
+ for( size_t i = 0; i < node->mNumChildren; i++) {
// recurse into the tree until we are done!
traverseNodes( node->mChildren[i], nested_node_id+1 );
}
}
+// ------------------------------------------------------------------------------------------------
void ScaleProcess::applyScaling( aiNode *currentNode ) {
if ( nullptr != currentNode ) {
// Reconstruct matrix by transform rather than by scale
diff --git a/code/PostProcessing/ScaleProcess.h b/code/PostProcessing/ScaleProcess.h
index b6eb75de7..ae1c3ed00 100644
--- a/code/PostProcessing/ScaleProcess.h
+++ b/code/PostProcessing/ScaleProcess.h
@@ -62,11 +62,10 @@ namespace Assimp {
*/
class ASSIMP_API ScaleProcess : public BaseProcess {
public:
- /// The default class constructor.
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
ScaleProcess();
-
- /// The class destructor.
- virtual ~ScaleProcess();
+ ~ScaleProcess() override = default;
/// Will set the scale manually.
void setScale( ai_real scale );
@@ -75,13 +74,13 @@ public:
ai_real getScale() const;
/// Overwritten, @see BaseProcess
- virtual bool IsActive( unsigned int pFlags ) const;
+ virtual bool IsActive( unsigned int pFlags ) const override;
/// Overwritten, @see BaseProcess
- virtual void SetupProperties( const Importer* pImp );
+ virtual void SetupProperties( const Importer* pImp ) override;
/// Overwritten, @see BaseProcess
- virtual void Execute( aiScene* pScene );
+ virtual void Execute( aiScene* pScene ) override;
private:
void traverseNodes( aiNode *currentNode, unsigned int nested_node_id = 0 );
diff --git a/code/PostProcessing/SortByPTypeProcess.cpp b/code/PostProcessing/SortByPTypeProcess.cpp
index 6312fa173..1be75fc48 100644
--- a/code/PostProcessing/SortByPTypeProcess.cpp
+++ b/code/PostProcessing/SortByPTypeProcess.cpp
@@ -59,10 +59,6 @@ SortByPTypeProcess::SortByPTypeProcess() :
// empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-SortByPTypeProcess::~SortByPTypeProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool SortByPTypeProcess::IsActive(unsigned int pFlags) const {
diff --git a/code/PostProcessing/SortByPTypeProcess.h b/code/PostProcessing/SortByPTypeProcess.h
index e30342a86..ce4f7da62 100644
--- a/code/PostProcessing/SortByPTypeProcess.h
+++ b/code/PostProcessing/SortByPTypeProcess.h
@@ -60,17 +60,19 @@ namespace Assimp {
*/
class ASSIMP_API SortByPTypeProcess : public BaseProcess {
public:
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
SortByPTypeProcess();
- ~SortByPTypeProcess();
+ ~SortByPTypeProcess() override = default;
// -------------------------------------------------------------------
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
- void SetupProperties(const Importer* pImp);
+ void SetupProperties(const Importer* pImp) override;
private:
int mConfigRemoveMeshes;
diff --git a/code/PostProcessing/SplitByBoneCountProcess.cpp b/code/PostProcessing/SplitByBoneCountProcess.cpp
index a501d3bd6..5324160d4 100644
--- a/code/PostProcessing/SplitByBoneCountProcess.cpp
+++ b/code/PostProcessing/SplitByBoneCountProcess.cpp
@@ -40,7 +40,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------------
*/
-
/// @file SplitByBoneCountProcess.cpp
/// Implementation of the SplitByBoneCount postprocessing step
@@ -59,47 +58,36 @@ using namespace Assimp::Formatter;
// ------------------------------------------------------------------------------------------------
// Constructor
-SplitByBoneCountProcess::SplitByBoneCountProcess()
-{
- // set default, might be overridden by importer config
- mMaxBoneCount = AI_SBBC_DEFAULT_MAX_BONES;
+SplitByBoneCountProcess::SplitByBoneCountProcess() : mMaxBoneCount(AI_SBBC_DEFAULT_MAX_BONES) {
+ // empty
}
-// ------------------------------------------------------------------------------------------------
-// Destructor
-SplitByBoneCountProcess::~SplitByBoneCountProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag.
-bool SplitByBoneCountProcess::IsActive( unsigned int pFlags) const
-{
+bool SplitByBoneCountProcess::IsActive( unsigned int pFlags) const {
return !!(pFlags & aiProcess_SplitByBoneCount);
}
// ------------------------------------------------------------------------------------------------
// Updates internal properties
-void SplitByBoneCountProcess::SetupProperties(const Importer* pImp)
-{
+void SplitByBoneCountProcess::SetupProperties(const Importer* pImp) {
mMaxBoneCount = pImp->GetPropertyInteger(AI_CONFIG_PP_SBBC_MAX_BONES,AI_SBBC_DEFAULT_MAX_BONES);
}
// ------------------------------------------------------------------------------------------------
// Executes the post processing step on the given imported data.
-void SplitByBoneCountProcess::Execute( aiScene* pScene)
-{
+void SplitByBoneCountProcess::Execute( aiScene* pScene) {
ASSIMP_LOG_DEBUG("SplitByBoneCountProcess begin");
// early out
bool isNecessary = false;
for( unsigned int a = 0; a < pScene->mNumMeshes; ++a)
- if( pScene->mMeshes[a]->mNumBones > mMaxBoneCount )
- {
+ if( pScene->mMeshes[a]->mNumBones > mMaxBoneCount ) {
isNecessary = true;
break;
}
- if( !isNecessary )
- {
+ if( !isNecessary ) {
ASSIMP_LOG_DEBUG("SplitByBoneCountProcess early-out: no meshes with more than ", mMaxBoneCount, " bones." );
return;
}
@@ -111,28 +99,23 @@ void SplitByBoneCountProcess::Execute( aiScene* pScene)
// build a new array of meshes for the scene
std::vector meshes;
- for( unsigned int a = 0; a < pScene->mNumMeshes; ++a)
- {
+ for( unsigned int a = 0; a < pScene->mNumMeshes; ++a) {
aiMesh* srcMesh = pScene->mMeshes[a];
std::vector newMeshes;
SplitMesh( pScene->mMeshes[a], newMeshes);
// mesh was split
- if( !newMeshes.empty() )
- {
+ if( !newMeshes.empty() ) {
// store new meshes and indices of the new meshes
- for( unsigned int b = 0; b < newMeshes.size(); ++b)
- {
+ for( unsigned int b = 0; b < newMeshes.size(); ++b) {
mSubMeshIndices[a].push_back( static_cast(meshes.size()));
meshes.push_back( newMeshes[b]);
}
// and destroy the source mesh. It should be completely contained inside the new submeshes
delete srcMesh;
- }
- else
- {
+ } else {
// Mesh is kept unchanged - store it's new place in the mesh array
mSubMeshIndices[a].push_back( static_cast(meshes.size()));
meshes.push_back( srcMesh);
@@ -153,11 +136,9 @@ void SplitByBoneCountProcess::Execute( aiScene* pScene)
// ------------------------------------------------------------------------------------------------
// Splits the given mesh by bone count.
-void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vector& poNewMeshes) const
-{
+void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vector& poNewMeshes) const {
// skip if not necessary
- if( pMesh->mNumBones <= mMaxBoneCount )
- {
+ if( pMesh->mNumBones <= mMaxBoneCount ) {
return;
}
@@ -165,27 +146,22 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vector BoneWeight;
std::vector< std::vector > vertexBones( pMesh->mNumVertices);
- for( unsigned int a = 0; a < pMesh->mNumBones; ++a)
- {
+ for( unsigned int a = 0; a < pMesh->mNumBones; ++a) {
const aiBone* bone = pMesh->mBones[a];
- for( unsigned int b = 0; b < bone->mNumWeights; ++b)
- {
- if (bone->mWeights[b].mWeight > 0.0f)
- {
- int vertexId = bone->mWeights[b].mVertexId;
- vertexBones[vertexId].emplace_back(a, bone->mWeights[b].mWeight);
- if (vertexBones[vertexId].size() > mMaxBoneCount)
- {
- throw DeadlyImportError("SplitByBoneCountProcess: Single face requires more bones than specified max bone count!");
+ for( unsigned int b = 0; b < bone->mNumWeights; ++b) {
+ if (bone->mWeights[b].mWeight > 0.0f) {
+ int vertexId = bone->mWeights[b].mVertexId;
+ vertexBones[vertexId].emplace_back(a, bone->mWeights[b].mWeight);
+ if (vertexBones[vertexId].size() > mMaxBoneCount) {
+ throw DeadlyImportError("SplitByBoneCountProcess: Single face requires more bones than specified max bone count!");
+ }
}
- }
}
}
unsigned int numFacesHandled = 0;
std::vector isFaceHandled( pMesh->mNumFaces, false);
- while( numFacesHandled < pMesh->mNumFaces )
- {
+ while( numFacesHandled < pMesh->mNumFaces ) {
// which bones are used in the current submesh
unsigned int numBones = 0;
std::vector isBoneUsed( pMesh->mNumBones, false);
@@ -196,11 +172,9 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vectormNumFaces; ++a)
- {
+ for( unsigned int a = 0; a < pMesh->mNumFaces; ++a) {
// skip if the face is already stored in a submesh
- if( isFaceHandled[a] )
- {
+ if( isFaceHandled[a] ) {
continue;
}
// a small local set of new bones for the current face. State of all used bones for that face
@@ -209,33 +183,27 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vectormFaces[a];
// check every vertex if its bones would still fit into the current submesh
- for( unsigned int b = 0; b < face.mNumIndices; ++b )
- {
- const std::vector& vb = vertexBones[face.mIndices[b]];
- for( unsigned int c = 0; c < vb.size(); ++c)
- {
- unsigned int boneIndex = vb[c].first;
- if( !isBoneUsed[boneIndex] )
- {
- newBonesAtCurrentFace.insert(boneIndex);
+ for( unsigned int b = 0; b < face.mNumIndices; ++b ) {
+ const std::vector& vb = vertexBones[face.mIndices[b]];
+ for( unsigned int c = 0; c < vb.size(); ++c) {
+ unsigned int boneIndex = vb[c].first;
+ if( !isBoneUsed[boneIndex] ) {
+ newBonesAtCurrentFace.insert(boneIndex);
+ }
}
- }
}
// leave out the face if the new bones required for this face don't fit the bone count limit anymore
- if( numBones + newBonesAtCurrentFace.size() > mMaxBoneCount )
- {
+ if( numBones + newBonesAtCurrentFace.size() > mMaxBoneCount ) {
continue;
}
// mark all new bones as necessary
- for (std::set::iterator it = newBonesAtCurrentFace.begin(); it != newBonesAtCurrentFace.end(); ++it)
- {
- if (!isBoneUsed[*it])
- {
- isBoneUsed[*it] = true;
- numBones++;
- }
+ for (std::set::iterator it = newBonesAtCurrentFace.begin(); it != newBonesAtCurrentFace.end(); ++it) {
+ if (!isBoneUsed[*it]) {
+ isBoneUsed[*it] = true;
+ numBones++;
+ }
}
// store the face index and the vertex count
@@ -261,27 +229,21 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vectormNumVertices = numSubMeshVertices;
newMesh->mNumFaces = static_cast(subMeshFaces.size());
newMesh->mVertices = new aiVector3D[newMesh->mNumVertices];
- if( pMesh->HasNormals() )
- {
+ if( pMesh->HasNormals() ) {
newMesh->mNormals = new aiVector3D[newMesh->mNumVertices];
}
- if( pMesh->HasTangentsAndBitangents() )
- {
+ if( pMesh->HasTangentsAndBitangents() ) {
newMesh->mTangents = new aiVector3D[newMesh->mNumVertices];
newMesh->mBitangents = new aiVector3D[newMesh->mNumVertices];
}
- for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++a )
- {
- if( pMesh->HasTextureCoords( a) )
- {
+ for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++a ) {
+ if( pMesh->HasTextureCoords( a) ) {
newMesh->mTextureCoords[a] = new aiVector3D[newMesh->mNumVertices];
}
newMesh->mNumUVComponents[a] = pMesh->mNumUVComponents[a];
}
- for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; ++a )
- {
- if( pMesh->HasVertexColors( a) )
- {
+ for( unsigned int a = 0; a < AI_MAX_NUMBER_OF_COLOR_SETS; ++a ) {
+ if( pMesh->HasVertexColors( a) ) {
newMesh->mColors[a] = new aiColor4D[newMesh->mNumVertices];
}
}
@@ -290,41 +252,33 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vectormFaces = new aiFace[subMeshFaces.size()];
unsigned int nvi = 0; // next vertex index
std::vector previousVertexIndices( numSubMeshVertices, std::numeric_limits::max()); // per new vertex: its index in the source mesh
- for( unsigned int a = 0; a < subMeshFaces.size(); ++a )
- {
+ for( unsigned int a = 0; a < subMeshFaces.size(); ++a ) {
const aiFace& srcFace = pMesh->mFaces[subMeshFaces[a]];
aiFace& dstFace = newMesh->mFaces[a];
dstFace.mNumIndices = srcFace.mNumIndices;
dstFace.mIndices = new unsigned int[dstFace.mNumIndices];
// accumulate linearly all the vertices of the source face
- for( unsigned int b = 0; b < dstFace.mNumIndices; ++b )
- {
+ for( unsigned int b = 0; b < dstFace.mNumIndices; ++b ) {
unsigned int srcIndex = srcFace.mIndices[b];
dstFace.mIndices[b] = nvi;
previousVertexIndices[nvi] = srcIndex;
newMesh->mVertices[nvi] = pMesh->mVertices[srcIndex];
- if( pMesh->HasNormals() )
- {
+ if( pMesh->HasNormals() ) {
newMesh->mNormals[nvi] = pMesh->mNormals[srcIndex];
}
- if( pMesh->HasTangentsAndBitangents() )
- {
+ if( pMesh->HasTangentsAndBitangents() ) {
newMesh->mTangents[nvi] = pMesh->mTangents[srcIndex];
newMesh->mBitangents[nvi] = pMesh->mBitangents[srcIndex];
}
- for( unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++c )
- {
- if( pMesh->HasTextureCoords( c) )
- {
+ for( unsigned int c = 0; c < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++c ) {
+ if( pMesh->HasTextureCoords( c) ) {
newMesh->mTextureCoords[c][nvi] = pMesh->mTextureCoords[c][srcIndex];
}
}
- for( unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS; ++c )
- {
- if( pMesh->HasVertexColors( c) )
- {
+ for( unsigned int c = 0; c < AI_MAX_NUMBER_OF_COLOR_SETS; ++c ) {
+ if( pMesh->HasVertexColors( c) ) {
newMesh->mColors[c][nvi] = pMesh->mColors[c][srcIndex];
}
}
@@ -340,10 +294,8 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vectormBones = new aiBone*[numBones];
std::vector mappedBoneIndex( pMesh->mNumBones, std::numeric_limits::max());
- for( unsigned int a = 0; a < pMesh->mNumBones; ++a )
- {
- if( !isBoneUsed[a] )
- {
+ for( unsigned int a = 0; a < pMesh->mNumBones; ++a ) {
+ if( !isBoneUsed[a] ) {
continue;
}
@@ -360,24 +312,20 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vectormNumBones == numBones );
// iterate over all new vertices and count which bones affected its old vertex in the source mesh
- for( unsigned int a = 0; a < numSubMeshVertices; ++a )
- {
+ for( unsigned int a = 0; a < numSubMeshVertices; ++a ) {
unsigned int oldIndex = previousVertexIndices[a];
const std::vector& bonesOnThisVertex = vertexBones[oldIndex];
- for( unsigned int b = 0; b < bonesOnThisVertex.size(); ++b )
- {
+ for( unsigned int b = 0; b < bonesOnThisVertex.size(); ++b ) {
unsigned int newBoneIndex = mappedBoneIndex[ bonesOnThisVertex[b].first ];
- if( newBoneIndex != std::numeric_limits::max() )
- {
+ if( newBoneIndex != std::numeric_limits::max() ) {
newMesh->mBones[newBoneIndex]->mNumWeights++;
}
}
}
// allocate all bone weight arrays accordingly
- for( unsigned int a = 0; a < newMesh->mNumBones; ++a )
- {
+ for( unsigned int a = 0; a < newMesh->mNumBones; ++a ) {
aiBone* bone = newMesh->mBones[a];
ai_assert( bone->mNumWeights > 0 );
bone->mWeights = new aiVertexWeight[bone->mNumWeights];
@@ -385,16 +333,14 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vector& bonesOnThisVertex = vertexBones[previousIndex];
// all of the bones affecting it should be present in the new submesh, or else
// the face it comprises shouldn't be present
- for( unsigned int b = 0; b < bonesOnThisVertex.size(); ++b)
- {
+ for( unsigned int b = 0; b < bonesOnThisVertex.size(); ++b) {
unsigned int newBoneIndex = mappedBoneIndex[ bonesOnThisVertex[b].first ];
ai_assert( newBoneIndex != std::numeric_limits::max() );
aiVertexWeight* dstWeight = newMesh->mBones[newBoneIndex]->mWeights + newMesh->mBones[newBoneIndex]->mNumWeights;
@@ -450,14 +396,11 @@ void SplitByBoneCountProcess::SplitMesh( const aiMesh* pMesh, std::vectormNumMeshes > 0 )
- {
+ if( pNode->mNumMeshes == 0 ) {
std::vector newMeshList;
- for( unsigned int a = 0; a < pNode->mNumMeshes; ++a)
- {
+ for( unsigned int a = 0; a < pNode->mNumMeshes; ++a) {
unsigned int srcIndex = pNode->mMeshes[a];
const std::vector& replaceMeshes = mSubMeshIndices[srcIndex];
newMeshList.insert( newMeshList.end(), replaceMeshes.begin(), replaceMeshes.end());
@@ -470,8 +413,7 @@ void SplitByBoneCountProcess::UpdateNode( aiNode* pNode) const
}
// do that also recursively for all children
- for( unsigned int a = 0; a < pNode->mNumChildren; ++a )
- {
+ for( unsigned int a = 0; a < pNode->mNumChildren; ++a ) {
UpdateNode( pNode->mChildren[a]);
}
}
diff --git a/code/PostProcessing/SplitByBoneCountProcess.h b/code/PostProcessing/SplitByBoneCountProcess.h
index 938b00c7f..625019e0c 100644
--- a/code/PostProcessing/SplitByBoneCountProcess.h
+++ b/code/PostProcessing/SplitByBoneCountProcess.h
@@ -4,7 +4,6 @@ Open Asset Import Library (assimp)
Copyright (c) 2006-2022, assimp team
-
All rights reserved.
Redistribution and use of this software in source and binary forms,
@@ -51,9 +50,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include
#include
-namespace Assimp
-{
-
+namespace Assimp {
/** Postprocessing filter to split meshes with many bones into submeshes
* so that each submesh has a certain max bone count.
@@ -61,34 +58,29 @@ namespace Assimp
* Applied BEFORE the JoinVertices-Step occurs.
* Returns NON-UNIQUE vertices, splits by bone count.
*/
-class SplitByBoneCountProcess : public BaseProcess
-{
+class SplitByBoneCountProcess : public BaseProcess {
public:
-
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
SplitByBoneCountProcess();
- ~SplitByBoneCountProcess();
+ ~SplitByBoneCountProcess() override = default;
-public:
- /** Returns whether the processing step is present in the given flag.
- * @param pFlags The processing flags the importer was called with. A
- * bitwise combination of #aiPostProcessSteps.
- * @return true if the process is present in this flag fields,
- * false if not.
- */
- bool IsActive( unsigned int pFlags) const;
+ /// @brief Returns whether the processing step is present in the given flag.
+ /// @param pFlags The processing flags the importer was called with. A
+ /// bitwise combination of #aiPostProcessSteps.
+ /// @return true if the process is present in this flag fields, false if not.
+ bool IsActive( unsigned int pFlags) const override;
- /** Called prior to ExecuteOnScene().
- * The function is a request to the process to update its configuration
- * basing on the Importer's configuration property list.
- */
- virtual void SetupProperties(const Importer* pImp);
+ /// @brief Called prior to ExecuteOnScene().
+ /// The function is a request to the process to update its configuration
+ /// basing on the Importer's configuration property list.
+ virtual void SetupProperties(const Importer* pImp) override;
protected:
- /** Executes the post processing step on the given imported data.
- * At the moment a process is not supposed to fail.
- * @param pScene The imported data to work at.
- */
- void Execute( aiScene* pScene);
+ /// Executes the post processing step on the given imported data.
+ /// At the moment a process is not supposed to fail.
+ /// @param pScene The imported data to work at.
+ void Execute( aiScene* pScene) override;
/// Splits the given mesh by bone count.
/// @param pMesh the Mesh to split. Is not changed at all, but might be superfluous in case it was split.
diff --git a/code/PostProcessing/SplitLargeMeshes.cpp b/code/PostProcessing/SplitLargeMeshes.cpp
index 151ac4991..73e0cc5d8 100644
--- a/code/PostProcessing/SplitLargeMeshes.cpp
+++ b/code/PostProcessing/SplitLargeMeshes.cpp
@@ -55,9 +55,6 @@ SplitLargeMeshesProcess_Triangle::SplitLargeMeshesProcess_Triangle() {
LIMIT = AI_SLM_DEFAULT_MAX_TRIANGLES;
}
-// ------------------------------------------------------------------------------------------------
-SplitLargeMeshesProcess_Triangle::~SplitLargeMeshesProcess_Triangle() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool SplitLargeMeshesProcess_Triangle::IsActive( unsigned int pFlags) const {
@@ -329,9 +326,6 @@ SplitLargeMeshesProcess_Vertex::SplitLargeMeshesProcess_Vertex() {
LIMIT = AI_SLM_DEFAULT_MAX_VERTICES;
}
-// ------------------------------------------------------------------------------------------------
-SplitLargeMeshesProcess_Vertex::~SplitLargeMeshesProcess_Vertex() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool SplitLargeMeshesProcess_Vertex::IsActive( unsigned int pFlags) const {
diff --git a/code/PostProcessing/SplitLargeMeshes.h b/code/PostProcessing/SplitLargeMeshes.h
index e5a8d4c1b..4e0d764c1 100644
--- a/code/PostProcessing/SplitLargeMeshes.h
+++ b/code/PostProcessing/SplitLargeMeshes.h
@@ -83,16 +83,15 @@ class SplitLargeMeshesProcess_Vertex;
* Applied BEFORE the JoinVertices-Step occurs.
* Returns NON-UNIQUE vertices, splits by triangle number.
*/
-class ASSIMP_API SplitLargeMeshesProcess_Triangle : public BaseProcess
-{
+class ASSIMP_API SplitLargeMeshesProcess_Triangle : public BaseProcess {
friend class SplitLargeMeshesProcess_Vertex;
public:
-
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
SplitLargeMeshesProcess_Triangle();
- ~SplitLargeMeshesProcess_Triangle();
+ ~SplitLargeMeshesProcess_Triangle() override = default;
-public:
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag.
* @param pFlags The processing flags the importer was called with. A
@@ -100,16 +99,14 @@ public:
* @return true if the process is present in this flag fields,
* false if not.
*/
- bool IsActive( unsigned int pFlags) const;
-
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Called prior to ExecuteOnScene().
* The function is a request to the process to update its configuration
* basing on the Importer's configuration property list.
*/
- virtual void SetupProperties(const Importer* pImp);
-
+ void SetupProperties(const Importer* pImp) override;
//! Set the split limit - needed for unit testing
inline void SetLimit(unsigned int l)
@@ -119,14 +116,12 @@ public:
inline unsigned int GetLimit() const
{return LIMIT;}
-public:
-
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
//! Apply the algorithm to a given mesh
@@ -144,36 +139,31 @@ public:
unsigned int LIMIT;
};
-
// ---------------------------------------------------------------------------
/** Post-processing filter to split large meshes into sub-meshes
*
* Applied AFTER the JoinVertices-Step occurs.
* Returns UNIQUE vertices, splits by vertex number.
*/
-class ASSIMP_API SplitLargeMeshesProcess_Vertex : public BaseProcess
-{
+class ASSIMP_API SplitLargeMeshesProcess_Vertex : public BaseProcess {
public:
-
SplitLargeMeshesProcess_Vertex();
- ~SplitLargeMeshesProcess_Vertex();
+ ~SplitLargeMeshesProcess_Vertex() override = default;
-public:
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
* @param pFlags The processing flags the importer was called with. A bitwise
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Called prior to ExecuteOnScene().
* The function is a request to the process to update its configuration
* basing on the Importer's configuration property list.
*/
- virtual void SetupProperties(const Importer* pImp);
-
+ void SetupProperties(const Importer* pImp) override;
//! Set the split limit - needed for unit testing
inline void SetLimit(unsigned int l)
@@ -183,14 +173,12 @@ public:
inline unsigned int GetLimit() const
{return LIMIT;}
-public:
-
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
//! Apply the algorithm to a given mesh
diff --git a/code/PostProcessing/TextureTransform.cpp b/code/PostProcessing/TextureTransform.cpp
index efbf4d2c6..2ed17f390 100644
--- a/code/PostProcessing/TextureTransform.cpp
+++ b/code/PostProcessing/TextureTransform.cpp
@@ -56,33 +56,24 @@ using namespace Assimp;
// ------------------------------------------------------------------------------------------------
// Constructor to be privately used by Importer
-TextureTransformStep::TextureTransformStep() :
- configFlags()
-{
+TextureTransformStep::TextureTransformStep() : configFlags() {
// nothing to do here
}
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-TextureTransformStep::~TextureTransformStep() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
-bool TextureTransformStep::IsActive( unsigned int pFlags) const
-{
+bool TextureTransformStep::IsActive( unsigned int pFlags) const {
return (pFlags & aiProcess_TransformUVCoords) != 0;
}
// ------------------------------------------------------------------------------------------------
// Setup properties
-void TextureTransformStep::SetupProperties(const Importer* pImp)
-{
+void TextureTransformStep::SetupProperties(const Importer* pImp) {
configFlags = pImp->GetPropertyInteger(AI_CONFIG_PP_TUV_EVALUATE,AI_UVTRAFO_ALL);
}
// ------------------------------------------------------------------------------------------------
-void TextureTransformStep::PreProcessUVTransform(STransformVecInfo& info)
-{
+void TextureTransformStep::PreProcessUVTransform(STransformVecInfo& info) {
/* This function tries to simplify the input UV transformation.
* That's very important as it allows us to reduce the number
* of output UV channels. The order in which the transformations
@@ -90,7 +81,7 @@ void TextureTransformStep::PreProcessUVTransform(STransformVecInfo& info)
*/
int rounded;
- char szTemp[512];
+ char szTemp[512] = {};
/* Optimize the rotation angle. That's slightly difficult as
* we have an inprecise floating-point number (when comparing
@@ -98,12 +89,10 @@ void TextureTransformStep::PreProcessUVTransform(STransformVecInfo& info)
* an epsilon of 5 degrees). If there is a rotation value, we can't
* perform any further optimizations.
*/
- if (info.mRotation)
- {
+ if (info.mRotation) {
float out = info.mRotation;
rounded = static_cast((info.mRotation / static_cast(AI_MATH_TWO_PI)));
- if (rounded)
- {
+ if (rounded) {
out -= rounded * static_cast(AI_MATH_PI);
ASSIMP_LOG_INFO("Texture coordinate rotation ", info.mRotation, " can be simplified to ", out);
}
@@ -187,8 +176,7 @@ void TextureTransformStep::PreProcessUVTransform(STransformVecInfo& info)
}
// ------------------------------------------------------------------------------------------------
-void UpdateUVIndex(const std::list& l, unsigned int n)
-{
+void UpdateUVIndex(const std::list& l, unsigned int n) {
// Don't set if == 0 && wasn't set before
for (std::list::const_iterator it = l.begin();it != l.end(); ++it) {
const TTUpdateInfo& info = *it;
@@ -203,8 +191,7 @@ void UpdateUVIndex(const std::list& l, unsigned int n)
}
// ------------------------------------------------------------------------------------------------
-inline const char* MappingModeToChar(aiTextureMapMode map)
-{
+inline static const char* MappingModeToChar(aiTextureMapMode map) {
if (aiTextureMapMode_Wrap == map)
return "-w";
@@ -215,8 +202,7 @@ inline const char* MappingModeToChar(aiTextureMapMode map)
}
// ------------------------------------------------------------------------------------------------
-void TextureTransformStep::Execute( aiScene* pScene)
-{
+void TextureTransformStep::Execute( aiScene* pScene) {
ASSIMP_LOG_DEBUG("TransformUVCoordsProcess begin");
diff --git a/code/PostProcessing/TextureTransform.h b/code/PostProcessing/TextureTransform.h
index c1cccf8ef..c9f0480ba 100644
--- a/code/PostProcessing/TextureTransform.h
+++ b/code/PostProcessing/TextureTransform.h
@@ -193,28 +193,23 @@ struct STransformVecInfo : public aiUVTransform {
/** Helper step to compute final UV coordinate sets if there are scalings
* or rotations in the original data read from the file.
*/
-class TextureTransformStep : public BaseProcess
-{
+class TextureTransformStep : public BaseProcess {
public:
-
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
TextureTransformStep();
- ~TextureTransformStep();
-
-public:
+ ~TextureTransformStep() override = default;
// -------------------------------------------------------------------
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
- void SetupProperties(const Importer* pImp);
-
+ void SetupProperties(const Importer* pImp) override;
protected:
-
-
// -------------------------------------------------------------------
/** Preprocess a specific UV transformation setup
*
@@ -223,10 +218,9 @@ protected:
void PreProcessUVTransform(STransformVecInfo& info);
private:
-
unsigned int configFlags;
};
-
-}
+
+} // namespace Assimp
#endif //! AI_TEXTURE_TRANSFORM_H_INCLUDED
diff --git a/code/PostProcessing/TriangulateProcess.cpp b/code/PostProcessing/TriangulateProcess.cpp
index 52e760361..52cfa66bf 100644
--- a/code/PostProcessing/TriangulateProcess.cpp
+++ b/code/PostProcessing/TriangulateProcess.cpp
@@ -156,15 +156,6 @@ namespace {
}
-
-// ------------------------------------------------------------------------------------------------
-// Constructor to be privately used by Importer
-TriangulateProcess::TriangulateProcess() = default;
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-TriangulateProcess::~TriangulateProcess() = default;
-
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
bool TriangulateProcess::IsActive( unsigned int pFlags) const
@@ -468,6 +459,21 @@ bool TriangulateProcess::TriangulateMesh( aiMesh* pMesh)
continue;
}
+ // Skip when three point is in a line
+ aiVector2D left = *pnt0 - *pnt1;
+ aiVector2D right = *pnt2 - *pnt1;
+
+ left.Normalize();
+ right.Normalize();
+ auto mul = left * right;
+
+ // if the angle is 0 or 180
+ if (std::abs(mul - 1.f) < ai_epsilon || std::abs(mul + 1.f) < ai_epsilon) {
+ // skip this ear
+ ASSIMP_LOG_WARN("Skip a ear, due to its angle is near 0 or 180.");
+ continue;
+ }
+
// and no other point may be contained in this triangle
for ( tmp = 0; tmp < max; ++tmp) {
diff --git a/code/PostProcessing/TriangulateProcess.h b/code/PostProcessing/TriangulateProcess.h
index ed5f4a587..ac31e4377 100644
--- a/code/PostProcessing/TriangulateProcess.h
+++ b/code/PostProcessing/TriangulateProcess.h
@@ -61,8 +61,10 @@ namespace Assimp {
*/
class ASSIMP_API TriangulateProcess : public BaseProcess {
public:
- TriangulateProcess();
- ~TriangulateProcess();
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
+ TriangulateProcess() = default;
+ ~TriangulateProcess() override = default;
// -------------------------------------------------------------------
/** Returns whether the processing step is present in the given flag field.
@@ -70,14 +72,14 @@ public:
* combination of #aiPostProcessSteps.
* @return true if the process is present in this flag fields, false if not.
*/
- bool IsActive( unsigned int pFlags) const;
+ bool IsActive( unsigned int pFlags) const override;
// -------------------------------------------------------------------
/** Executes the post processing step on the given imported data.
* At the moment a process is not supposed to fail.
* @param pScene The imported data to work at.
*/
- void Execute( aiScene* pScene);
+ void Execute( aiScene* pScene) override;
// -------------------------------------------------------------------
/** Triangulates the given mesh.
diff --git a/code/PostProcessing/ValidateDataStructure.cpp b/code/PostProcessing/ValidateDataStructure.cpp
index d234e220b..e31054972 100644
--- a/code/PostProcessing/ValidateDataStructure.cpp
+++ b/code/PostProcessing/ValidateDataStructure.cpp
@@ -60,12 +60,7 @@ using namespace Assimp;
// ------------------------------------------------------------------------------------------------
// Constructor to be privately used by Importer
-ValidateDSProcess::ValidateDSProcess() :
- mScene() {}
-
-// ------------------------------------------------------------------------------------------------
-// Destructor, private as well
-ValidateDSProcess::~ValidateDSProcess() = default;
+ValidateDSProcess::ValidateDSProcess() : mScene(nullptr) {}
// ------------------------------------------------------------------------------------------------
// Returns whether the processing step is present in the given flag field.
@@ -916,7 +911,12 @@ void ValidateDSProcess::Validate(const aiNode *pNode) {
nodeName, pNode->mNumChildren);
}
for (unsigned int i = 0; i < pNode->mNumChildren; ++i) {
- Validate(pNode->mChildren[i]);
+ const aiNode *pChild = pNode->mChildren[i];
+ Validate(pChild);
+ if (pChild->mParent != pNode) {
+ const char *parentName = (pChild->mParent != nullptr) ? pChild->mParent->mName.C_Str() : "null";
+ ReportError("aiNode \"%s\" child %i \"%s\" parent is someone else: \"%s\"", pNode->mName.C_Str(), i, pChild->mName.C_Str(), parentName);
+ }
}
}
}
diff --git a/code/PostProcessing/ValidateDataStructure.h b/code/PostProcessing/ValidateDataStructure.h
index 077a47b70..9cfd4ced1 100644
--- a/code/PostProcessing/ValidateDataStructure.h
+++ b/code/PostProcessing/ValidateDataStructure.h
@@ -69,22 +69,20 @@ namespace Assimp {
/** Validates the whole ASSIMP scene data structure for correctness.
* ImportErrorException is thrown of the scene is corrupt.*/
// --------------------------------------------------------------------------------------
-class ValidateDSProcess : public BaseProcess
-{
+class ValidateDSProcess : public BaseProcess {
public:
-
+ // -------------------------------------------------------------------
+ /// The default class constructor / destructor.
ValidateDSProcess();
- ~ValidateDSProcess();
-
-public:
- // -------------------------------------------------------------------
- bool IsActive( unsigned int pFlags) const;
+ ~ValidateDSProcess() override = default;
// -------------------------------------------------------------------
- void Execute( aiScene* pScene);
+ bool IsActive( unsigned int pFlags) const override;
+
+ // -------------------------------------------------------------------
+ void Execute( aiScene* pScene) override;
protected:
-
// -------------------------------------------------------------------
/** Report a validation error. This will throw an exception,
* control won't return.
diff --git a/contrib/draco/.cmake-format.py b/contrib/draco/.cmake-format.py
index 64f2495b4..5b36f67aa 100644
--- a/contrib/draco/.cmake-format.py
+++ b/contrib/draco/.cmake-format.py
@@ -1,102 +1,137 @@
-# Generated with cmake-format 0.5.1
-# How wide to allow formatted cmake files
-line_width = 80
-
-# How many spaces to tab for indent
-tab_size = 2
-
-# If arglists are longer than this, break them always
-max_subargs_per_line = 10
-
-# If true, separate flow control names from their parentheses with a space
-separate_ctrl_name_with_space = False
-
-# If true, separate function names from parentheses with a space
-separate_fn_name_with_space = False
-
-# If a statement is wrapped to more than one line, than dangle the closing
-# parenthesis on its own line
-dangle_parens = False
-
-# What character to use for bulleted lists
-bullet_char = '*'
-
-# What character to use as punctuation after numerals in an enumerated list
-enum_char = '.'
-
-# What style line endings to use in the output.
-line_ending = u'unix'
-
-# Format command names consistently as 'lower' or 'upper' case
-command_case = u'lower'
-
-# Format keywords consistently as 'lower' or 'upper' case
-keyword_case = u'unchanged'
-
-# Specify structure for custom cmake functions
-additional_commands = {
- "foo": {
- "flags": [
- "BAR",
- "BAZ"
- ],
- "kwargs": {
- "HEADERS": "*",
- "DEPENDS": "*",
- "SOURCES": "*"
- }
+with section('parse'):
+ # Specify structure for custom cmake functions
+ additional_commands = {
+ 'draco_add_emscripten_executable': {
+ 'kwargs': {
+ 'NAME': '*',
+ 'SOURCES': '*',
+ 'OUTPUT_NAME': '*',
+ 'DEFINES': '*',
+ 'INCLUDES': '*',
+ 'COMPILE_FLAGS': '*',
+ 'LINK_FLAGS': '*',
+ 'OBJLIB_DEPS': '*',
+ 'LIB_DEPS': '*',
+ 'GLUE_PATH': '*',
+ 'PRE_LINK_JS_SOURCES': '*',
+ 'POST_LINK_JS_SOURCES': '*',
+ 'FEATURES': '*',
+ },
+ 'pargs': 0,
+ },
+ 'draco_add_executable': {
+ 'kwargs': {
+ 'NAME': '*',
+ 'SOURCES': '*',
+ 'OUTPUT_NAME': '*',
+ 'TEST': 0,
+ 'DEFINES': '*',
+ 'INCLUDES': '*',
+ 'COMPILE_FLAGS': '*',
+ 'LINK_FLAGS': '*',
+ 'OBJLIB_DEPS': '*',
+ 'LIB_DEPS': '*',
+ },
+ 'pargs': 0,
+ },
+ 'draco_add_library': {
+ 'kwargs': {
+ 'NAME': '*',
+ 'TYPE': '*',
+ 'SOURCES': '*',
+ 'TEST': 0,
+ 'OUTPUT_NAME': '*',
+ 'DEFINES': '*',
+ 'INCLUDES': '*',
+ 'COMPILE_FLAGS': '*',
+ 'LINK_FLAGS': '*',
+ 'OBJLIB_DEPS': '*',
+ 'LIB_DEPS': '*',
+ 'PUBLIC_INCLUDES': '*',
+ },
+ 'pargs': 0,
+ },
+ 'draco_generate_emscripten_glue': {
+ 'kwargs': {
+ 'INPUT_IDL': '*',
+ 'OUTPUT_PATH': '*',
+ },
+ 'pargs': 0,
+ },
+ 'draco_get_required_emscripten_flags': {
+ 'kwargs': {
+ 'FLAG_LIST_VAR_COMPILER': '*',
+ 'FLAG_LIST_VAR_LINKER': '*',
+ },
+ 'pargs': 0,
+ },
+ 'draco_option': {
+ 'kwargs': {
+ 'NAME': '*',
+ 'HELPSTRING': '*',
+ 'VALUE': '*',
+ },
+ 'pargs': 0,
+ },
+ # Rules for built in CMake commands and those from dependencies.
+ 'list': {
+ 'kwargs': {
+ 'APPEND': '*',
+ 'FILTER': '*',
+ 'FIND': '*',
+ 'GET': '*',
+ 'INSERT': '*',
+ 'JOIN': '*',
+ 'LENGTH': '*',
+ 'POP_BACK': '*',
+ 'POP_FRONT': '*',
+ 'PREPEND': '*',
+ 'REMOVE_DUPLICATES': '*',
+ 'REMOVE_ITEM': '*',
+ 'REVERSE': '*',
+ 'SORT': '*',
+ 'SUBLIST': '*',
+ 'TRANSFORM': '*',
+ },
+ },
+ 'protobuf_generate': {
+ 'kwargs': {
+ 'IMPORT_DIRS': '*',
+ 'LANGUAGE': '*',
+ 'OUT_VAR': '*',
+ 'PROTOC_OUT_DIR': '*',
+ 'PROTOS': '*',
+ },
+ },
}
-}
-# A list of command names which should always be wrapped
-always_wrap = []
+with section('format'):
+ # Formatting options.
-# Specify the order of wrapping algorithms during successive reflow attempts
-algorithm_order = [0, 1, 2, 3, 4]
+ # How wide to allow formatted cmake files
+ line_width = 80
-# If true, the argument lists which are known to be sortable will be sorted
-# lexicographicall
-autosort = False
+ # How many spaces to tab for indent
+ tab_size = 2
-# enable comment markup parsing and reflow
-enable_markup = True
+ # If true, separate flow control names from their parentheses with a space
+ separate_ctrl_name_with_space = False
-# If comment markup is enabled, don't reflow the first comment block in
-# eachlistfile. Use this to preserve formatting of your
-# copyright/licensestatements.
-first_comment_is_literal = False
+ # If true, separate function names from parentheses with a space
+ separate_fn_name_with_space = False
-# If comment markup is enabled, don't reflow any comment block which matchesthis
-# (regex) pattern. Default is `None` (disabled).
-literal_comment_pattern = None
+ # If a statement is wrapped to more than one line, than dangle the closing
+ # parenthesis on its own line.
+ dangle_parens = False
-# Regular expression to match preformat fences in comments
-# default=r'^\s*([`~]{3}[`~]*)(.*)$'
-fence_pattern = u'^\\s*([`~]{3}[`~]*)(.*)$'
+ # Do not sort argument lists.
+ enable_sort = False
-# Regular expression to match rulers in comments
-# default=r'^\s*[^\w\s]{3}.*[^\w\s]{3}$'
-ruler_pattern = u'^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$'
+ # What style line endings to use in the output.
+ line_ending = 'unix'
-# If true, emit the unicode byte-order mark (BOM) at the start of the file
-emit_byteorder_mark = False
+ # Format command names consistently as 'lower' or 'upper' case
+ command_case = 'canonical'
-# If a comment line starts with at least this many consecutive hash characters,
-# then don't lstrip() them off. This allows for lazy hash rulers where the first
-# hash char is not separated by space
-hashruler_min_length = 10
-
-# If true, then insert a space between the first hash char and remaining hash
-# chars in a hash ruler, and normalize its length to fill the column
-canonicalize_hashrulers = True
-
-# Specify the encoding of the input file. Defaults to utf-8.
-input_encoding = u'utf-8'
-
-# Specify the encoding of the output file. Defaults to utf-8. Note that cmake
-# only claims to support utf-8 so be careful when using anything else
-output_encoding = u'utf-8'
-
-# A dictionary containing any per-command configuration overrides. Currently
-# only `command_case` is supported.
-per_command = {}
+ # Format keywords consistently as 'lower' or 'upper' case
+ keyword_case = 'upper'
diff --git a/contrib/draco/.gitattributes b/contrib/draco/.gitattributes
new file mode 100644
index 000000000..96acfc612
--- /dev/null
+++ b/contrib/draco/.gitattributes
@@ -0,0 +1 @@
+*.obj eol=lf
\ No newline at end of file
diff --git a/contrib/draco/.gitmodules b/contrib/draco/.gitmodules
new file mode 100644
index 000000000..25f0a1c03
--- /dev/null
+++ b/contrib/draco/.gitmodules
@@ -0,0 +1,12 @@
+[submodule "third_party/googletest"]
+ path = third_party/googletest
+ url = https://github.com/google/googletest.git
+[submodule "third_party/eigen"]
+ path = third_party/eigen
+ url = https://gitlab.com/libeigen/eigen.git
+[submodule "third_party/tinygltf"]
+ path = third_party/tinygltf
+ url = https://github.com/syoyo/tinygltf.git
+[submodule "third_party/filesystem"]
+ path = third_party/filesystem
+ url = https://github.com/gulrak/filesystem
diff --git a/contrib/draco/BUILDING.md b/contrib/draco/BUILDING.md
index d33917b88..340b2b83b 100644
--- a/contrib/draco/BUILDING.md
+++ b/contrib/draco/BUILDING.md
@@ -4,8 +4,10 @@ _**Contents**_
* [Mac OS X](#mac-os-x)
* [Windows](#windows)
* [CMake Build Configuration](#cmake-build-configuration)
+ * [Transcoder](#transcoder)
* [Debugging and Optimization](#debugging-and-optimization)
* [Googletest Integration](#googletest-integration)
+ * [Third Party Libraries](#third-party-libraries)
* [Javascript Encoder/Decoder](#javascript-encoderdecoder)
* [WebAssembly Decoder](#webassembly-decoder)
* [WebAssembly Mesh Only Decoder](#webassembly-mesh-only-decoder)
@@ -72,6 +74,43 @@ C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A x64
CMake Build Configuration
-------------------------
+Transcoder
+----------
+
+Before attempting to build Draco with transcoding support you must run an
+additional Git command to obtain the submodules:
+
+~~~~~ bash
+# Run this command from within your Draco clone.
+$ git submodule update --init
+# See below if you prefer to use existing versions of Draco dependencies.
+~~~~~
+
+In order to build the `draco_transcoder` target, the transcoding support needs
+to be explicitly enabled when you run `cmake`, for example:
+
+~~~~~ bash
+$ cmake ../ -DDRACO_TRANSCODER_SUPPORTED=ON
+~~~~~
+
+The above option is currently not compatible with our Javascript or WebAssembly
+builds but all other use cases are supported. Note that binaries and libraries
+built with the transcoder support may result in increased binary sizes of the
+produced libraries and executables compared to the default CMake settings.
+
+The following CMake variables can be used to configure Draco to use local
+copies of third party dependencies instead of git submodules.
+
+- `DRACO_EIGEN_PATH`: this path must contain an Eigen directory that includes
+ the Eigen sources.
+- `DRACO_FILESYSTEM_PATH`: this path must contain the ghc directory where the
+ filesystem includes are located.
+- `DRACO_TINYGLTF_PATH`: this path must contain tiny_gltf.h and its
+ dependencies.
+
+When not specified the Draco build requires the presence of the submodules that
+are stored within `draco/third_party`.
+
Debugging and Optimization
--------------------------
@@ -114,17 +153,52 @@ $ cmake ../ -DDRACO_SANITIZE=address
Googletest Integration
----------------------
-Draco includes testing support built using Googletest. To enable Googletest unit
-test support the DRACO_TESTS cmake variable must be turned on at cmake
-generation time:
+Draco includes testing support built using Googletest. The Googletest repository
+is included as a submodule of the Draco git repository. Run the following
+command to clone the Googletest repository:
+
+~~~~~ bash
+$ git submodule update --init
+~~~~~
+
+To enable Googletest unit test support the DRACO_TESTS cmake variable must be
+turned on at cmake generation time:
~~~~~ bash
$ cmake ../ -DDRACO_TESTS=ON
~~~~~
-When cmake is used as shown in the above example the googletest directory must
-be a sibling of the Draco repository root directory. To run the tests execute
-`draco_tests` from your build output directory.
+To run the tests execute `draco_tests` from your build output directory:
+
+~~~~~ bash
+$ ./draco_tests
+~~~~~
+
+Draco can be configured to use a local Googletest installation. The
+`DRACO_GOOGLETEST_PATH` variable overrides the behavior described above and
+configures Draco to use the Googletest at the specified path.
+
+Third Party Libraries
+---------------------
+
+When Draco is built with transcoding and/or testing support enabled the project
+has dependencies on third party libraries:
+
+- [Eigen](https://eigen.tuxfamily.org/)
+ - Provides various math utilites.
+- [Googletest](https://github.com/google/googletest)
+ - Provides testing support.
+- [Gulrak/filesystem](https://github.com/gulrak/filesystem)
+ - Provides C++17 std::filesystem emulation for pre-C++17 environments.
+- [TinyGLTF](https://github.com/syoyo/tinygltf)
+ - Provides GLTF I/O support.
+
+These dependencies are managed as Git submodules. To obtain the dependencies
+run the following command in your Draco repository:
+
+~~~~~ bash
+$ git submodule update --init
+~~~~~
WebAssembly Decoder
-------------------
diff --git a/contrib/draco/CMakeLists.txt b/contrib/draco/CMakeLists.txt
index 6ea9b21fd..a93267d25 100644
--- a/contrib/draco/CMakeLists.txt
+++ b/contrib/draco/CMakeLists.txt
@@ -1,7 +1,18 @@
-cmake_minimum_required(VERSION 3.12 FATAL_ERROR)
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
-# Draco requires C++11.
-set(CMAKE_CXX_STANDARD 11)
+cmake_minimum_required(VERSION 3.12 FATAL_ERROR)
project(draco C CXX)
if(NOT CMAKE_BUILD_TYPE)
@@ -10,21 +21,23 @@ endif()
set(draco_root "${CMAKE_CURRENT_SOURCE_DIR}")
set(draco_src_root "${draco_root}/src/draco")
-set(draco_build "${Assimp_BINARY_DIR}")
+set(draco_build "${CMAKE_BINARY_DIR}")
if("${draco_root}" STREQUAL "${draco_build}")
message(
- FATAL_ERROR "Building from within the Draco source tree is not supported.\n"
- "Hint: Run these commands\n"
- "$ rm -rf CMakeCache.txt CMakeFiles\n"
- "$ mkdir -p ../draco_build\n" "$ cd ../draco_build\n"
- "And re-run CMake from the draco_build directory.")
+ FATAL_ERROR
+ "Building from within the Draco source tree is not supported.\n"
+ "Hint: Run these commands\n"
+ "$ rm -rf CMakeCache.txt CMakeFiles\n"
+ "$ mkdir -p ../draco_build\n"
+ "$ cd ../draco_build\n"
+ "And re-run CMake from the draco_build directory.")
endif()
-include(CMakePackageConfigHelpers)
include(FindPythonInterp)
include("${draco_root}/cmake/draco_build_definitions.cmake")
include("${draco_root}/cmake/draco_cpu_detection.cmake")
+include("${draco_root}/cmake/draco_dependencies.cmake")
include("${draco_root}/cmake/draco_emscripten.cmake")
include("${draco_root}/cmake/draco_flags.cmake")
include("${draco_root}/cmake/draco_helpers.cmake")
@@ -49,6 +62,7 @@ draco_track_configuration_variable(DRACO_GENERATED_SOURCES_DIRECTORY)
# Controls use of std::mutex and absl::Mutex in ThreadPool.
draco_track_configuration_variable(DRACO_THREADPOOL_USE_STD_MUTEX)
+
if(DRACO_VERBOSE)
draco_dump_cmake_flag_variables()
draco_dump_tracked_configuration_variables()
@@ -68,29 +82,32 @@ draco_reset_target_lists()
draco_setup_options()
draco_set_build_definitions()
draco_set_cxx_flags()
+draco_set_exe_linker_flags()
draco_generate_features_h()
# Draco source file listing variables.
-list(APPEND draco_attributes_sources
- "${draco_src_root}/attributes/attribute_octahedron_transform.cc"
- "${draco_src_root}/attributes/attribute_octahedron_transform.h"
- "${draco_src_root}/attributes/attribute_quantization_transform.cc"
- "${draco_src_root}/attributes/attribute_quantization_transform.h"
- "${draco_src_root}/attributes/attribute_transform.cc"
- "${draco_src_root}/attributes/attribute_transform.h"
- "${draco_src_root}/attributes/attribute_transform_data.h"
- "${draco_src_root}/attributes/attribute_transform_type.h"
- "${draco_src_root}/attributes/geometry_attribute.cc"
- "${draco_src_root}/attributes/geometry_attribute.h"
- "${draco_src_root}/attributes/geometry_indices.h"
- "${draco_src_root}/attributes/point_attribute.cc"
- "${draco_src_root}/attributes/point_attribute.h")
+list(
+ APPEND draco_attributes_sources
+ "${draco_src_root}/attributes/attribute_octahedron_transform.cc"
+ "${draco_src_root}/attributes/attribute_octahedron_transform.h"
+ "${draco_src_root}/attributes/attribute_quantization_transform.cc"
+ "${draco_src_root}/attributes/attribute_quantization_transform.h"
+ "${draco_src_root}/attributes/attribute_transform.cc"
+ "${draco_src_root}/attributes/attribute_transform.h"
+ "${draco_src_root}/attributes/attribute_transform_data.h"
+ "${draco_src_root}/attributes/attribute_transform_type.h"
+ "${draco_src_root}/attributes/geometry_attribute.cc"
+ "${draco_src_root}/attributes/geometry_attribute.h"
+ "${draco_src_root}/attributes/geometry_indices.h"
+ "${draco_src_root}/attributes/point_attribute.cc"
+ "${draco_src_root}/attributes/point_attribute.h")
list(
APPEND
draco_compression_attributes_dec_sources
"${draco_src_root}/compression/attributes/attributes_decoder.cc"
"${draco_src_root}/compression/attributes/attributes_decoder.h"
+ "${draco_src_root}/compression/attributes/attributes_decoder_interface.h"
"${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.cc"
"${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.h"
"${draco_src_root}/compression/attributes/kd_tree_attributes_shared.h"
@@ -107,7 +124,7 @@ list(
"${draco_src_root}/compression/attributes/sequential_normal_attribute_decoder.h"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.cc"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.h"
- )
+)
list(
APPEND
@@ -128,7 +145,7 @@ list(
"${draco_src_root}/compression/attributes/sequential_normal_attribute_encoder.h"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.cc"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.h"
- )
+)
list(
@@ -160,7 +177,7 @@ list(
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h"
- )
+)
list(
APPEND
@@ -192,7 +209,7 @@ list(
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h"
- )
+)
list(
APPEND
@@ -217,27 +234,34 @@ list(
"${draco_src_root}/compression/bit_coders/symbol_bit_encoder.cc"
"${draco_src_root}/compression/bit_coders/symbol_bit_encoder.h")
-list(APPEND draco_enc_config_sources
- "${draco_src_root}/compression/config/compression_shared.h"
- "${draco_src_root}/compression/config/draco_options.h"
- "${draco_src_root}/compression/config/encoder_options.h"
- "${draco_src_root}/compression/config/encoding_features.h")
+list(
+ APPEND draco_enc_config_sources
+ "${draco_src_root}/compression/config/compression_shared.h"
+ "${draco_src_root}/compression/config/draco_options.h"
+ "${draco_src_root}/compression/config/encoder_options.h"
+ "${draco_src_root}/compression/config/encoding_features.h")
-list(APPEND draco_dec_config_sources
- "${draco_src_root}/compression/config/compression_shared.h"
- "${draco_src_root}/compression/config/decoder_options.h"
- "${draco_src_root}/compression/config/draco_options.h")
+list(
+ APPEND draco_dec_config_sources
+ "${draco_src_root}/compression/config/compression_shared.h"
+ "${draco_src_root}/compression/config/decoder_options.h"
+ "${draco_src_root}/compression/config/draco_options.h")
+
+list(APPEND draco_compression_options_sources
+ "${draco_src_root}/compression/draco_compression_options.cc"
+ "${draco_src_root}/compression/draco_compression_options.h")
list(APPEND draco_compression_decode_sources
"${draco_src_root}/compression/decode.cc"
"${draco_src_root}/compression/decode.h")
-list(APPEND draco_compression_encode_sources
- "${draco_src_root}/compression/encode.cc"
- "${draco_src_root}/compression/encode.h"
- "${draco_src_root}/compression/encode_base.h"
- "${draco_src_root}/compression/expert_encode.cc"
- "${draco_src_root}/compression/expert_encode.h")
+list(
+ APPEND draco_compression_encode_sources
+ "${draco_src_root}/compression/encode.cc"
+ "${draco_src_root}/compression/encode.h"
+ "${draco_src_root}/compression/encode_base.h"
+ "${draco_src_root}/compression/expert_encode.cc"
+ "${draco_src_root}/compression/expert_encode.h")
list(
APPEND
@@ -291,7 +315,7 @@ list(
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_decoder.h"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.h"
- )
+)
list(
APPEND
@@ -302,112 +326,126 @@ list(
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoder.h"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.h"
- )
+)
-list(APPEND draco_compression_entropy_sources
- "${draco_src_root}/compression/entropy/ans.h"
- "${draco_src_root}/compression/entropy/rans_symbol_coding.h"
- "${draco_src_root}/compression/entropy/rans_symbol_decoder.h"
- "${draco_src_root}/compression/entropy/rans_symbol_encoder.h"
- "${draco_src_root}/compression/entropy/shannon_entropy.cc"
- "${draco_src_root}/compression/entropy/shannon_entropy.h"
- "${draco_src_root}/compression/entropy/symbol_decoding.cc"
- "${draco_src_root}/compression/entropy/symbol_decoding.h"
- "${draco_src_root}/compression/entropy/symbol_encoding.cc"
- "${draco_src_root}/compression/entropy/symbol_encoding.h")
+list(
+ APPEND draco_compression_entropy_sources
+ "${draco_src_root}/compression/entropy/ans.h"
+ "${draco_src_root}/compression/entropy/rans_symbol_coding.h"
+ "${draco_src_root}/compression/entropy/rans_symbol_decoder.h"
+ "${draco_src_root}/compression/entropy/rans_symbol_encoder.h"
+ "${draco_src_root}/compression/entropy/shannon_entropy.cc"
+ "${draco_src_root}/compression/entropy/shannon_entropy.h"
+ "${draco_src_root}/compression/entropy/symbol_decoding.cc"
+ "${draco_src_root}/compression/entropy/symbol_decoding.h"
+ "${draco_src_root}/compression/entropy/symbol_encoding.cc"
+ "${draco_src_root}/compression/entropy/symbol_encoding.h")
-list(APPEND draco_core_sources
- "${draco_src_root}/core/bit_utils.cc"
- "${draco_src_root}/core/bit_utils.h"
- "${draco_src_root}/core/bounding_box.cc"
- "${draco_src_root}/core/bounding_box.h"
- "${draco_src_root}/core/cycle_timer.cc"
- "${draco_src_root}/core/cycle_timer.h"
- "${draco_src_root}/core/data_buffer.cc"
- "${draco_src_root}/core/data_buffer.h"
- "${draco_src_root}/core/decoder_buffer.cc"
- "${draco_src_root}/core/decoder_buffer.h"
- "${draco_src_root}/core/divide.cc"
- "${draco_src_root}/core/divide.h"
- "${draco_src_root}/core/draco_index_type.h"
- "${draco_src_root}/core/draco_index_type_vector.h"
- "${draco_src_root}/core/draco_types.cc"
- "${draco_src_root}/core/draco_types.h"
- "${draco_src_root}/core/encoder_buffer.cc"
- "${draco_src_root}/core/encoder_buffer.h"
- "${draco_src_root}/core/hash_utils.cc"
- "${draco_src_root}/core/hash_utils.h"
- "${draco_src_root}/core/macros.h"
- "${draco_src_root}/core/math_utils.h"
- "${draco_src_root}/core/options.cc"
- "${draco_src_root}/core/options.h"
- "${draco_src_root}/core/quantization_utils.cc"
- "${draco_src_root}/core/quantization_utils.h"
- "${draco_src_root}/core/status.h"
- "${draco_src_root}/core/status_or.h"
- "${draco_src_root}/core/varint_decoding.h"
- "${draco_src_root}/core/varint_encoding.h"
- "${draco_src_root}/core/vector_d.h")
+list(
+ APPEND draco_core_sources
+ "${draco_src_root}/core/bit_utils.cc"
+ "${draco_src_root}/core/bit_utils.h"
+ "${draco_src_root}/core/bounding_box.cc"
+ "${draco_src_root}/core/bounding_box.h"
+ "${draco_src_root}/core/constants.h"
+ "${draco_src_root}/core/cycle_timer.cc"
+ "${draco_src_root}/core/cycle_timer.h"
+ "${draco_src_root}/core/data_buffer.cc"
+ "${draco_src_root}/core/data_buffer.h"
+ "${draco_src_root}/core/decoder_buffer.cc"
+ "${draco_src_root}/core/decoder_buffer.h"
+ "${draco_src_root}/core/divide.cc"
+ "${draco_src_root}/core/divide.h"
+ "${draco_src_root}/core/draco_index_type.h"
+ "${draco_src_root}/core/draco_index_type_vector.h"
+ "${draco_src_root}/core/draco_types.cc"
+ "${draco_src_root}/core/draco_types.h"
+ "${draco_src_root}/core/draco_version.h"
+ "${draco_src_root}/core/encoder_buffer.cc"
+ "${draco_src_root}/core/encoder_buffer.h"
+ "${draco_src_root}/core/hash_utils.cc"
+ "${draco_src_root}/core/hash_utils.h"
+ "${draco_src_root}/core/macros.h"
+ "${draco_src_root}/core/math_utils.h"
+ "${draco_src_root}/core/options.cc"
+ "${draco_src_root}/core/options.h"
+ "${draco_src_root}/core/quantization_utils.cc"
+ "${draco_src_root}/core/quantization_utils.h"
+ "${draco_src_root}/core/status.h"
+ "${draco_src_root}/core/status_or.h"
+ "${draco_src_root}/core/varint_decoding.h"
+ "${draco_src_root}/core/varint_encoding.h"
+ "${draco_src_root}/core/vector_d.h")
-list(APPEND draco_io_sources
- "${draco_src_root}/io/file_reader_factory.cc"
- "${draco_src_root}/io/file_reader_factory.h"
- "${draco_src_root}/io/file_reader_interface.h"
- "${draco_src_root}/io/file_utils.cc"
- "${draco_src_root}/io/file_utils.h"
- "${draco_src_root}/io/file_writer_factory.cc"
- "${draco_src_root}/io/file_writer_factory.h"
- "${draco_src_root}/io/file_writer_interface.h"
- "${draco_src_root}/io/file_writer_utils.h"
- "${draco_src_root}/io/file_writer_utils.cc"
- "${draco_src_root}/io/mesh_io.cc"
- "${draco_src_root}/io/mesh_io.h"
- "${draco_src_root}/io/obj_decoder.cc"
- "${draco_src_root}/io/obj_decoder.h"
- "${draco_src_root}/io/obj_encoder.cc"
- "${draco_src_root}/io/obj_encoder.h"
- "${draco_src_root}/io/parser_utils.cc"
- "${draco_src_root}/io/parser_utils.h"
- "${draco_src_root}/io/ply_decoder.cc"
- "${draco_src_root}/io/ply_decoder.h"
- "${draco_src_root}/io/ply_encoder.cc"
- "${draco_src_root}/io/ply_encoder.h"
- "${draco_src_root}/io/ply_property_reader.h"
- "${draco_src_root}/io/ply_property_writer.h"
- "${draco_src_root}/io/ply_reader.cc"
- "${draco_src_root}/io/ply_reader.h"
- "${draco_src_root}/io/point_cloud_io.cc"
- "${draco_src_root}/io/point_cloud_io.h"
- "${draco_src_root}/io/stdio_file_reader.cc"
- "${draco_src_root}/io/stdio_file_reader.h"
- "${draco_src_root}/io/stdio_file_writer.cc"
- "${draco_src_root}/io/stdio_file_writer.h")
+list(
+ APPEND draco_io_sources
+ "${draco_src_root}/io/file_reader_factory.cc"
+ "${draco_src_root}/io/file_reader_factory.h"
+ "${draco_src_root}/io/file_reader_interface.h"
+ "${draco_src_root}/io/file_utils.cc"
+ "${draco_src_root}/io/file_utils.h"
+ "${draco_src_root}/io/file_writer_factory.cc"
+ "${draco_src_root}/io/file_writer_factory.h"
+ "${draco_src_root}/io/file_writer_interface.h"
+ "${draco_src_root}/io/file_writer_utils.h"
+ "${draco_src_root}/io/file_writer_utils.cc"
+ "${draco_src_root}/io/mesh_io.cc"
+ "${draco_src_root}/io/mesh_io.h"
+ "${draco_src_root}/io/obj_decoder.cc"
+ "${draco_src_root}/io/obj_decoder.h"
+ "${draco_src_root}/io/obj_encoder.cc"
+ "${draco_src_root}/io/obj_encoder.h"
+ "${draco_src_root}/io/parser_utils.cc"
+ "${draco_src_root}/io/parser_utils.h"
+ "${draco_src_root}/io/ply_decoder.cc"
+ "${draco_src_root}/io/ply_decoder.h"
+ "${draco_src_root}/io/ply_encoder.cc"
+ "${draco_src_root}/io/ply_encoder.h"
+ "${draco_src_root}/io/ply_property_reader.h"
+ "${draco_src_root}/io/ply_property_writer.h"
+ "${draco_src_root}/io/ply_reader.cc"
+ "${draco_src_root}/io/ply_reader.h"
+ "${draco_src_root}/io/stl_decoder.cc"
+ "${draco_src_root}/io/stl_decoder.h"
+ "${draco_src_root}/io/stl_encoder.cc"
+ "${draco_src_root}/io/stl_encoder.h"
+ "${draco_src_root}/io/point_cloud_io.cc"
+ "${draco_src_root}/io/point_cloud_io.h"
+ "${draco_src_root}/io/stdio_file_reader.cc"
+ "${draco_src_root}/io/stdio_file_reader.h"
+ "${draco_src_root}/io/stdio_file_writer.cc"
+ "${draco_src_root}/io/stdio_file_writer.h")
-list(APPEND draco_mesh_sources
- "${draco_src_root}/mesh/corner_table.cc"
- "${draco_src_root}/mesh/corner_table.h"
- "${draco_src_root}/mesh/corner_table_iterators.h"
- "${draco_src_root}/mesh/mesh.cc"
- "${draco_src_root}/mesh/mesh.h"
- "${draco_src_root}/mesh/mesh_are_equivalent.cc"
- "${draco_src_root}/mesh/mesh_are_equivalent.h"
- "${draco_src_root}/mesh/mesh_attribute_corner_table.cc"
- "${draco_src_root}/mesh/mesh_attribute_corner_table.h"
- "${draco_src_root}/mesh/mesh_cleanup.cc"
- "${draco_src_root}/mesh/mesh_cleanup.h"
- "${draco_src_root}/mesh/mesh_misc_functions.cc"
- "${draco_src_root}/mesh/mesh_misc_functions.h"
- "${draco_src_root}/mesh/mesh_stripifier.cc"
- "${draco_src_root}/mesh/mesh_stripifier.h"
- "${draco_src_root}/mesh/triangle_soup_mesh_builder.cc"
- "${draco_src_root}/mesh/triangle_soup_mesh_builder.h"
- "${draco_src_root}/mesh/valence_cache.h")
+list(
+ APPEND draco_mesh_sources
+ "${draco_src_root}/mesh/corner_table.cc"
+ "${draco_src_root}/mesh/corner_table.h"
+ "${draco_src_root}/mesh/corner_table_iterators.h"
+ "${draco_src_root}/mesh/mesh.cc"
+ "${draco_src_root}/mesh/mesh.h"
+ "${draco_src_root}/mesh/mesh_are_equivalent.cc"
+ "${draco_src_root}/mesh/mesh_are_equivalent.h"
+ "${draco_src_root}/mesh/mesh_attribute_corner_table.cc"
+ "${draco_src_root}/mesh/mesh_attribute_corner_table.h"
+ "${draco_src_root}/mesh/mesh_cleanup.cc"
+ "${draco_src_root}/mesh/mesh_cleanup.h"
+ "${draco_src_root}/mesh/mesh_features.cc"
+ "${draco_src_root}/mesh/mesh_features.h"
+ "${draco_src_root}/mesh/mesh_indices.h"
+ "${draco_src_root}/mesh/mesh_misc_functions.cc"
+ "${draco_src_root}/mesh/mesh_misc_functions.h"
+ "${draco_src_root}/mesh/mesh_stripifier.cc"
+ "${draco_src_root}/mesh/mesh_stripifier.h"
+ "${draco_src_root}/mesh/triangle_soup_mesh_builder.cc"
+ "${draco_src_root}/mesh/triangle_soup_mesh_builder.h"
+ "${draco_src_root}/mesh/valence_cache.h")
-list(APPEND draco_point_cloud_sources
- "${draco_src_root}/point_cloud/point_cloud.cc"
- "${draco_src_root}/point_cloud/point_cloud.h"
- "${draco_src_root}/point_cloud/point_cloud_builder.cc"
- "${draco_src_root}/point_cloud/point_cloud_builder.h")
+list(
+ APPEND draco_point_cloud_sources
+ "${draco_src_root}/point_cloud/point_cloud.cc"
+ "${draco_src_root}/point_cloud/point_cloud.h"
+ "${draco_src_root}/point_cloud/point_cloud_builder.cc"
+ "${draco_src_root}/point_cloud/point_cloud_builder.h")
list(
APPEND
@@ -424,7 +462,7 @@ list(
"${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.cc"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.h"
- )
+)
list(
APPEND
@@ -433,13 +471,18 @@ list(
"${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.cc"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.h"
- )
+)
-list(APPEND draco_metadata_sources
- "${draco_src_root}/metadata/geometry_metadata.cc"
- "${draco_src_root}/metadata/geometry_metadata.h"
- "${draco_src_root}/metadata/metadata.cc"
- "${draco_src_root}/metadata/metadata.h")
+list(
+ APPEND draco_metadata_sources
+ "${draco_src_root}/metadata/geometry_metadata.cc"
+ "${draco_src_root}/metadata/geometry_metadata.h"
+ "${draco_src_root}/metadata/metadata.cc"
+ "${draco_src_root}/metadata/metadata.h"
+ "${draco_src_root}/metadata/property_table.cc"
+ "${draco_src_root}/metadata/property_table.h"
+ "${draco_src_root}/metadata/structural_metadata.cc"
+ "${draco_src_root}/metadata/structural_metadata.h")
list(APPEND draco_metadata_enc_sources
"${draco_src_root}/metadata/metadata_encoder.cc"
@@ -465,7 +508,7 @@ list(
APPEND draco_js_dec_sources
"${draco_src_root}/javascript/emscripten/decoder_webidl_wrapper.cc"
"${draco_src_root}/javascript/emscripten/draco_decoder_glue_wrapper.cc"
- )
+)
list(
APPEND draco_js_enc_sources
@@ -477,14 +520,14 @@ list(
draco_animation_js_dec_sources
"${draco_src_root}/javascript/emscripten/animation_decoder_webidl_wrapper.cc"
"${draco_src_root}/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc"
- )
+)
list(
APPEND
draco_animation_js_enc_sources
"${draco_src_root}/javascript/emscripten/animation_encoder_webidl_wrapper.cc"
"${draco_src_root}/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc"
- )
+)
list(APPEND draco_unity_plug_sources
"${draco_src_root}/unity/draco_unity_plugin.cc"
@@ -494,49 +537,133 @@ list(APPEND draco_maya_plug_sources
"${draco_src_root}/maya/draco_maya_plugin.cc"
"${draco_src_root}/maya/draco_maya_plugin.h")
+if(DRACO_TRANSCODER_SUPPORTED)
+ list(
+ APPEND draco_animation_sources
+ "${draco_src_root}/animation/animation.cc"
+ "${draco_src_root}/animation/animation.h"
+ "${draco_src_root}/animation/node_animation_data.h"
+ "${draco_src_root}/animation/skin.cc"
+ "${draco_src_root}/animation/skin.h")
+
+ list(
+ APPEND draco_io_sources
+ "${draco_src_root}/io/gltf_decoder.cc"
+ "${draco_src_root}/io/gltf_decoder.h"
+ "${draco_src_root}/io/gltf_encoder.cc"
+ "${draco_src_root}/io/gltf_encoder.h"
+ "${draco_src_root}/io/gltf_utils.cc"
+ "${draco_src_root}/io/gltf_utils.h"
+ "${draco_src_root}/io/image_compression_options.h"
+ "${draco_src_root}/io/scene_io.cc"
+ "${draco_src_root}/io/scene_io.h"
+ "${draco_src_root}/io/texture_io.cc"
+ "${draco_src_root}/io/texture_io.h"
+ "${draco_src_root}/io/tiny_gltf_utils.cc"
+ "${draco_src_root}/io/tiny_gltf_utils.h")
+
+ list(
+ APPEND draco_material_sources
+ "${draco_src_root}/material/material.cc"
+ "${draco_src_root}/material/material.h"
+ "${draco_src_root}/material/material_library.cc"
+ "${draco_src_root}/material/material_library.h")
+
+ list(
+ APPEND draco_mesh_sources
+ "${draco_src_root}/mesh/mesh_connected_components.h"
+ "${draco_src_root}/mesh/mesh_splitter.cc"
+ "${draco_src_root}/mesh/mesh_splitter.h"
+ "${draco_src_root}/mesh/mesh_utils.cc"
+ "${draco_src_root}/mesh/mesh_utils.h")
+
+ list(
+ APPEND draco_scene_sources
+ "${draco_src_root}/scene/instance_array.cc"
+ "${draco_src_root}/scene/instance_array.h"
+ "${draco_src_root}/scene/light.cc"
+ "${draco_src_root}/scene/light.h"
+ "${draco_src_root}/scene/mesh_group.h"
+ "${draco_src_root}/scene/scene.cc"
+ "${draco_src_root}/scene/scene.h"
+ "${draco_src_root}/scene/scene_are_equivalent.cc"
+ "${draco_src_root}/scene/scene_are_equivalent.h"
+ "${draco_src_root}/scene/scene_indices.h"
+ "${draco_src_root}/scene/scene_node.h"
+ "${draco_src_root}/scene/scene_utils.cc"
+ "${draco_src_root}/scene/scene_utils.h"
+ "${draco_src_root}/scene/trs_matrix.cc"
+ "${draco_src_root}/scene/trs_matrix.h")
+
+ list(
+ APPEND draco_texture_sources
+ "${draco_src_root}/texture/source_image.cc"
+ "${draco_src_root}/texture/source_image.h"
+ "${draco_src_root}/texture/texture.h"
+ "${draco_src_root}/texture/texture_library.cc"
+ "${draco_src_root}/texture/texture_library.h"
+ "${draco_src_root}/texture/texture_map.cc"
+ "${draco_src_root}/texture/texture_map.h"
+ "${draco_src_root}/texture/texture_transform.cc"
+ "${draco_src_root}/texture/texture_transform.h"
+ "${draco_src_root}/texture/texture_utils.cc"
+ "${draco_src_root}/texture/texture_utils.h")
+
+
+endif()
+
#
# Draco targets.
#
if(EMSCRIPTEN AND DRACO_JS_GLUE)
# Draco decoder and encoder "executable" targets in various flavors for
- # Emsscripten.
- list(APPEND draco_decoder_src
- ${draco_attributes_sources}
- ${draco_compression_attributes_dec_sources}
- ${draco_compression_attributes_pred_schemes_dec_sources}
- ${draco_compression_bit_coders_sources}
- ${draco_compression_decode_sources}
- ${draco_compression_entropy_sources}
- ${draco_compression_mesh_traverser_sources}
- ${draco_compression_mesh_dec_sources}
- ${draco_compression_point_cloud_dec_sources}
- ${draco_core_sources}
- ${draco_dec_config_sources}
- ${draco_js_dec_sources}
- ${draco_mesh_sources}
- ${draco_metadata_dec_sources}
- ${draco_metadata_sources}
- ${draco_point_cloud_sources}
- ${draco_points_dec_sources})
+ # Emscripten.
- list(APPEND draco_encoder_src
- ${draco_attributes_sources}
- ${draco_compression_attributes_enc_sources}
- ${draco_compression_attributes_pred_schemes_enc_sources}
- ${draco_compression_bit_coders_sources}
- ${draco_compression_encode_sources}
- ${draco_compression_entropy_sources}
- ${draco_compression_mesh_traverser_sources}
- ${draco_compression_mesh_enc_sources}
- ${draco_compression_point_cloud_enc_sources}
- ${draco_core_sources}
- ${draco_enc_config_sources}
- ${draco_js_enc_sources}
- ${draco_mesh_sources}
- ${draco_metadata_enc_sources}
- ${draco_metadata_sources}
- ${draco_point_cloud_sources}
- ${draco_points_enc_sources})
+ if(DRACO_TRANSCODER_SUPPORTED)
+ message(FATAL_ERROR "The transcoder is not supported in Emscripten.")
+ endif()
+
+ list(
+ APPEND draco_decoder_src
+ ${draco_attributes_sources}
+ ${draco_compression_attributes_dec_sources}
+ ${draco_compression_attributes_pred_schemes_dec_sources}
+ ${draco_compression_bit_coders_sources}
+ ${draco_compression_decode_sources}
+ ${draco_compression_entropy_sources}
+ ${draco_compression_mesh_traverser_sources}
+ ${draco_compression_mesh_dec_sources}
+ ${draco_compression_options_sources}
+ ${draco_compression_point_cloud_dec_sources}
+ ${draco_core_sources}
+ ${draco_dec_config_sources}
+ ${draco_js_dec_sources}
+ ${draco_mesh_sources}
+ ${draco_metadata_dec_sources}
+ ${draco_metadata_sources}
+ ${draco_point_cloud_sources}
+ ${draco_points_dec_sources})
+
+ list(
+ APPEND draco_encoder_src
+ ${draco_attributes_sources}
+ ${draco_compression_attributes_enc_sources}
+ ${draco_compression_attributes_pred_schemes_enc_sources}
+ ${draco_compression_bit_coders_sources}
+ ${draco_compression_encode_sources}
+ ${draco_compression_entropy_sources}
+ ${draco_compression_mesh_traverser_sources}
+ ${draco_compression_mesh_enc_sources}
+ ${draco_compression_options_sources}
+ ${draco_compression_point_cloud_enc_sources}
+ ${draco_core_sources}
+ ${draco_enc_config_sources}
+ ${draco_js_enc_sources}
+ ${draco_mesh_sources}
+ ${draco_metadata_enc_sources}
+ ${draco_metadata_sources}
+ ${draco_point_cloud_sources}
+ ${draco_points_enc_sources})
list(APPEND draco_js_dec_idl
"${draco_src_root}/javascript/emscripten/draco_web_decoder.idl")
@@ -561,10 +688,10 @@ if(EMSCRIPTEN AND DRACO_JS_GLUE)
set(draco_decoder_glue_path "${draco_build}/glue_decoder")
set(draco_encoder_glue_path "${draco_build}/glue_encoder")
- draco_generate_emscripten_glue(INPUT_IDL ${draco_js_dec_idl} OUTPUT_PATH
- ${draco_decoder_glue_path})
- draco_generate_emscripten_glue(INPUT_IDL ${draco_js_enc_idl} OUTPUT_PATH
- ${draco_encoder_glue_path})
+ draco_generate_emscripten_glue(INPUT_IDL ${draco_js_dec_idl}
+ OUTPUT_PATH ${draco_decoder_glue_path})
+ draco_generate_emscripten_glue(INPUT_IDL ${draco_js_enc_idl}
+ OUTPUT_PATH ${draco_encoder_glue_path})
if(DRACO_DECODER_ATTRIBUTE_DEDUPLICATION)
list(APPEND draco_decoder_features
@@ -572,45 +699,28 @@ if(EMSCRIPTEN AND DRACO_JS_GLUE)
"DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED")
endif()
- draco_add_emscripten_executable(NAME
- draco_decoder
- SOURCES
- ${draco_decoder_src}
- DEFINES
- ${draco_defines}
- FEATURES
- ${draco_decoder_features}
- INCLUDES
- ${draco_include_paths}
- LINK_FLAGS
- "-sEXPORT_NAME=\"DracoDecoderModule\""
- GLUE_PATH
- ${draco_decoder_glue_path}
- PRE_LINK_JS_SOURCES
- ${draco_pre_link_js_sources}
- POST_LINK_JS_SOURCES
- ${draco_post_link_js_decoder_sources})
+ draco_add_emscripten_executable(
+ NAME draco_decoder
+ SOURCES ${draco_decoder_src}
+ DEFINES ${draco_defines}
+ FEATURES ${draco_decoder_features}
+ INCLUDES ${draco_include_paths}
+ LINK_FLAGS "-sEXPORT_NAME=\"DracoDecoderModule\""
+ GLUE_PATH ${draco_decoder_glue_path}
+ PRE_LINK_JS_SOURCES ${draco_pre_link_js_sources}
+ POST_LINK_JS_SOURCES ${draco_post_link_js_decoder_sources})
draco_add_emscripten_executable(
- NAME
- draco_encoder
- SOURCES
- ${draco_encoder_src}
- DEFINES
- ${draco_defines}
- FEATURES
- DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
- DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
- INCLUDES
- ${draco_include_paths}
- LINK_FLAGS
- "-sEXPORT_NAME=\"DracoEncoderModule\""
- GLUE_PATH
- ${draco_encoder_glue_path}
- PRE_LINK_JS_SOURCES
- ${draco_pre_link_js_sources}
- POST_LINK_JS_SOURCES
- ${draco_post_link_js_sources})
+ NAME draco_encoder
+ SOURCES ${draco_encoder_src}
+ DEFINES ${draco_defines}
+ FEATURES DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
+ DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
+ INCLUDES ${draco_include_paths}
+ LINK_FLAGS "-sEXPORT_NAME=\"DracoEncoderModule\""
+ GLUE_PATH ${draco_encoder_glue_path}
+ PRE_LINK_JS_SOURCES ${draco_pre_link_js_sources}
+ POST_LINK_JS_SOURCES ${draco_post_link_js_sources})
if(DRACO_ANIMATION_ENCODING)
set(draco_anim_decoder_glue_path "${draco_build}/glue_animation_decoder")
@@ -622,186 +732,270 @@ if(EMSCRIPTEN AND DRACO_JS_GLUE)
OUTPUT_PATH ${draco_anim_encoder_glue_path})
draco_add_emscripten_executable(
- NAME
- draco_animation_decoder
- SOURCES
- ${draco_animation_dec_sources}
- ${draco_animation_js_dec_sources}
- ${draco_animation_sources}
- ${draco_decoder_src}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- LINK_FLAGS
- "-sEXPORT_NAME=\"DracoAnimationDecoderModule\""
- GLUE_PATH
- ${draco_anim_decoder_glue_path}
- PRE_LINK_JS_SOURCES
- ${draco_pre_link_js_sources}
- POST_LINK_JS_SOURCES
- ${draco_post_link_js_decoder_sources})
+ NAME draco_animation_decoder
+ SOURCES ${draco_animation_dec_sources} ${draco_animation_js_dec_sources}
+ ${draco_animation_sources} ${draco_decoder_src}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ LINK_FLAGS "-sEXPORT_NAME=\"DracoAnimationDecoderModule\""
+ GLUE_PATH ${draco_anim_decoder_glue_path}
+ PRE_LINK_JS_SOURCES ${draco_pre_link_js_sources}
+ POST_LINK_JS_SOURCES ${draco_post_link_js_decoder_sources})
draco_add_emscripten_executable(
- NAME
- draco_animation_encoder
- SOURCES
- ${draco_animation_enc_sources}
- ${draco_animation_js_enc_sources}
- ${draco_animation_sources}
- ${draco_encoder_src}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- LINK_FLAGS
- "-sEXPORT_NAME=\"DracoAnimationEncoderModule\""
- GLUE_PATH
- ${draco_anim_encoder_glue_path}
- PRE_LINK_JS_SOURCES
- ${draco_pre_link_js_sources}
- POST_LINK_JS_SOURCES
- ${draco_post_link_js_sources})
+ NAME draco_animation_encoder
+ SOURCES ${draco_animation_enc_sources} ${draco_animation_js_enc_sources}
+ ${draco_animation_sources} ${draco_encoder_src}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ LINK_FLAGS "-sEXPORT_NAME=\"DracoAnimationEncoderModule\""
+ GLUE_PATH ${draco_anim_encoder_glue_path}
+ PRE_LINK_JS_SOURCES ${draco_pre_link_js_sources}
+ POST_LINK_JS_SOURCES ${draco_post_link_js_sources})
endif()
else()
# Standard Draco libs, encoder and decoder. Object collections that mirror the
# Draco directory structure.
- draco_add_library(NAME draco_attributes TYPE OBJECT SOURCES
- ${draco_attributes_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME
- draco_compression_attributes_dec
- OBJECT
- ${draco_compression_attributes_dec_sources}
- TYPE
- OBJECT
- SOURCES
- ${draco_compression_attributes_dec_sources}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths})
- draco_add_library(NAME draco_compression_attributes_enc TYPE OBJECT SOURCES
- ${draco_compression_attributes_enc_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_attributes_pred_schemes_dec TYPE
- OBJECT SOURCES
- ${draco_compression_attributes_pred_schemes_dec_sources})
- draco_add_library(NAME draco_compression_attributes_pred_schemes_enc TYPE
- OBJECT SOURCES
- ${draco_compression_attributes_pred_schemes_enc_sources}
- DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_bit_coders TYPE OBJECT SOURCES
- ${draco_compression_bit_coders_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_enc_config TYPE OBJECT SOURCES
- ${draco_enc_config_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_dec_config TYPE OBJECT SOURCES
- ${draco_dec_config_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_decode TYPE OBJECT SOURCES
- ${draco_compression_decode_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_encode TYPE OBJECT SOURCES
- ${draco_compression_encode_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_entropy TYPE OBJECT SOURCES
- ${draco_compression_entropy_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_mesh_traverser TYPE OBJECT SOURCES
- ${draco_compression_mesh_traverser_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_mesh_dec TYPE OBJECT SOURCES
- ${draco_compression_mesh_dec_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_mesh_enc TYPE OBJECT SOURCES
- ${draco_compression_mesh_enc_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_point_cloud_dec TYPE OBJECT SOURCES
- ${draco_compression_point_cloud_dec_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_compression_point_cloud_enc TYPE OBJECT SOURCES
- ${draco_compression_point_cloud_enc_sources} DEFINES
- ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_core TYPE OBJECT SOURCES ${draco_core_sources}
- DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_io TYPE OBJECT SOURCES ${draco_io_sources}
- DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_mesh TYPE OBJECT SOURCES ${draco_mesh_sources}
- DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_metadata_dec TYPE OBJECT SOURCES
- ${draco_metadata_dec_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_metadata_enc TYPE OBJECT SOURCES
- ${draco_metadata_enc_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_metadata TYPE OBJECT SOURCES
- ${draco_metadata_sources} DEFINES ${draco_defines} INCLUDES
- ${draco_include_paths})
- draco_add_library(NAME draco_animation_dec TYPE OBJECT SOURCES
- ${draco_animation_dec_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_animation_enc TYPE OBJECT SOURCES
- ${draco_animation_enc_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME draco_animation TYPE OBJECT SOURCES
- ${draco_animation_sources} DEFINES ${draco_defines} INCLUDES
- ${draco_include_paths})
- draco_add_library(NAME draco_point_cloud TYPE OBJECT SOURCES
- ${draco_point_cloud_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
- draco_add_library(NAME
- draco_points_dec
- TYPE
- OBJECT
- SOURCES
- ${draco_points_common_sources}
- ${draco_points_dec_sources}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths})
- draco_add_library(NAME
- draco_points_enc
- TYPE
- OBJECT
- SOURCES
- ${draco_points_common_sources}
- ${draco_points_enc_sources}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths})
+ draco_add_library(
+ NAME draco_attributes
+ TYPE OBJECT
+ SOURCES ${draco_attributes_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_attributes_dec OBJECT
+ ${draco_compression_attributes_dec_sources}
+ TYPE OBJECT
+ SOURCES ${draco_compression_attributes_dec_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_attributes_enc
+ TYPE OBJECT
+ SOURCES ${draco_compression_attributes_enc_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_attributes_pred_schemes_dec
+ TYPE OBJECT
+ SOURCES ${draco_compression_attributes_pred_schemes_dec_sources})
+ draco_add_library(
+ NAME draco_compression_attributes_pred_schemes_enc
+ TYPE OBJECT
+ SOURCES ${draco_compression_attributes_pred_schemes_enc_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_bit_coders
+ TYPE OBJECT
+ SOURCES ${draco_compression_bit_coders_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_enc_config
+ TYPE OBJECT
+ SOURCES ${draco_enc_config_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_dec_config
+ TYPE OBJECT
+ SOURCES ${draco_dec_config_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_decode
+ TYPE OBJECT
+ SOURCES ${draco_compression_decode_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_encode
+ TYPE OBJECT
+ SOURCES ${draco_compression_encode_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_entropy
+ TYPE OBJECT
+ SOURCES ${draco_compression_entropy_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_mesh_traverser
+ TYPE OBJECT
+ SOURCES ${draco_compression_mesh_traverser_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_mesh_dec
+ TYPE OBJECT
+ SOURCES ${draco_compression_mesh_dec_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_mesh_enc
+ TYPE OBJECT
+ SOURCES ${draco_compression_mesh_enc_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_options
+ TYPE OBJECT
+ SOURCES ${draco_compression_options_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_point_cloud_dec
+ TYPE OBJECT
+ SOURCES ${draco_compression_point_cloud_dec_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_compression_point_cloud_enc
+ TYPE OBJECT
+ SOURCES ${draco_compression_point_cloud_enc_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_core
+ TYPE OBJECT
+ SOURCES ${draco_core_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_io
+ TYPE OBJECT
+ SOURCES ${draco_io_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_mesh
+ TYPE OBJECT
+ SOURCES ${draco_mesh_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_metadata_dec
+ TYPE OBJECT
+ SOURCES ${draco_metadata_dec_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_metadata_enc
+ TYPE OBJECT
+ SOURCES ${draco_metadata_enc_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_metadata
+ TYPE OBJECT
+ SOURCES ${draco_metadata_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_animation_dec
+ TYPE OBJECT
+ SOURCES ${draco_animation_dec_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_animation_enc
+ TYPE OBJECT
+ SOURCES ${draco_animation_enc_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_animation
+ TYPE OBJECT
+ SOURCES ${draco_animation_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_point_cloud
+ TYPE OBJECT
+ SOURCES ${draco_point_cloud_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_points_dec
+ TYPE OBJECT
+ SOURCES ${draco_points_common_sources} ${draco_points_dec_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_points_enc
+ TYPE OBJECT
+ SOURCES ${draco_points_common_sources} ${draco_points_enc_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
- set(draco_object_library_deps
- draco_attributes
- draco_compression_attributes_dec
- draco_compression_attributes_enc
- draco_compression_attributes_pred_schemes_dec
- draco_compression_attributes_pred_schemes_enc
- draco_compression_bit_coders
- draco_compression_decode
- draco_compression_encode
- draco_compression_entropy
- draco_compression_mesh_dec
- draco_compression_mesh_enc
- draco_compression_point_cloud_dec
- draco_compression_point_cloud_enc
- draco_core
- draco_dec_config
- draco_enc_config
- draco_io
- draco_mesh
- draco_metadata
- draco_metadata_dec
- draco_metadata_enc
- draco_animation
- draco_animation_dec
- draco_animation_enc
- draco_point_cloud
- draco_points_dec
- draco_points_enc)
+ if(DRACO_TRANSCODER_SUPPORTED)
+ if(MSVC)
+ # TODO(https://github.com/google/draco/issues/826)
+ set_source_files_properties("${draco_src_root}/io/gltf_decoder.cc"
+ PROPERTIES COMPILE_OPTIONS "/Od")
+ endif()
+
+ draco_add_library(
+ NAME draco_material
+ TYPE OBJECT
+ SOURCES ${draco_material_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+
+ draco_add_library(
+ NAME draco_scene
+ TYPE OBJECT
+ SOURCES ${draco_scene_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+
+ draco_add_library(
+ NAME draco_texture
+ TYPE OBJECT
+ SOURCES ${draco_texture_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
+
+ endif()
+
+ list(
+ APPEND draco_object_library_deps
+ draco_attributes
+ draco_compression_attributes_dec
+ draco_compression_attributes_enc
+ draco_compression_attributes_pred_schemes_dec
+ draco_compression_attributes_pred_schemes_enc
+ draco_compression_bit_coders
+ draco_compression_decode
+ draco_compression_encode
+ draco_compression_entropy
+ draco_compression_mesh_dec
+ draco_compression_mesh_enc
+ draco_compression_options
+ draco_compression_point_cloud_dec
+ draco_compression_point_cloud_enc
+ draco_core
+ draco_dec_config
+ draco_enc_config
+ draco_io
+ draco_mesh
+ draco_metadata
+ draco_metadata_dec
+ draco_metadata_enc
+ draco_animation
+ draco_animation_dec
+ draco_animation_enc
+ draco_point_cloud
+ draco_points_dec
+ draco_points_enc)
+
+ if(DRACO_TRANSCODER_SUPPORTED)
+ list(APPEND draco_object_library_deps draco_material draco_scene
+ draco_texture)
+
+ endif()
# Library targets that consume the object collections.
if(MSVC)
@@ -809,56 +1003,48 @@ else()
# that the exported symbols are part of the DLL target. The unfortunate side
# effect of this is that a single configuration cannot output both the
# static library and the DLL: This results in an either/or situation.
- # Windows users of the draco build can have a DLL and an import library,
- # or they can have a static library; they cannot have both from a single
+ # Windows users of the draco build can have a DLL and an import library, or
+ # they can have a static library; they cannot have both from a single
# configuration of the build.
if(BUILD_SHARED_LIBS)
set(draco_lib_type SHARED)
else()
set(draco_lib_type STATIC)
endif()
- draco_add_library(NAME
- draco
- OUTPUT_NAME
- draco
- TYPE
- ${draco_lib_type}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- OBJLIB_DEPS
- ${draco_object_library_deps})
+ draco_add_library(
+ NAME draco
+ OUTPUT_NAME draco
+ TYPE ${draco_lib_type}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ OBJLIB_DEPS ${draco_object_library_deps}
+ LIB_DEPS ${draco_lib_deps})
+ add_library(draco::draco ALIAS draco)
else()
- draco_add_library(NAME
- draco_static
- OUTPUT_NAME
- draco
- TYPE
- STATIC
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- OBJLIB_DEPS
- ${draco_object_library_deps})
+ draco_add_library(
+ NAME draco_static
+ OUTPUT_NAME draco
+ TYPE STATIC
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ OBJLIB_DEPS ${draco_object_library_deps}
+ LIB_DEPS ${draco_lib_deps})
if(BUILD_SHARED_LIBS)
- draco_add_library(NAME
- draco_shared
- SOURCES
- "${draco_src_root}/core/draco_version.h"
- OUTPUT_NAME
- draco
- TYPE
- SHARED
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- LIB_DEPS
- draco_static)
+ draco_add_library(
+ NAME draco_shared
+ SOURCES "${draco_src_root}/core/draco_version.h"
+ OUTPUT_NAME draco
+ TYPE SHARED
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ LIB_DEPS draco_static)
+ add_library(draco::draco ALIAS draco_shared)
+ set_target_properties(draco_shared PROPERTIES EXPORT_NAME draco)
+ else()
+ add_library(draco::draco ALIAS draco_static)
+ set_target_properties(draco_static PROPERTIES EXPORT_NAME draco)
endif()
endif()
@@ -869,22 +1055,20 @@ else()
set(unity_decoder_lib_type MODULE)
endif()
- draco_add_library(NAME draco_unity_plugin TYPE OBJECT SOURCES
- ${draco_unity_plug_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_unity_plugin
+ TYPE OBJECT
+ SOURCES ${draco_unity_plug_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
- draco_add_library(NAME
- dracodec_unity
- TYPE
- ${unity_decoder_lib_type}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- OBJLIB_DEPS
- draco_unity_plugin
- LIB_DEPS
- ${draco_plugin_dependency})
+ draco_add_library(
+ NAME dracodec_unity
+ TYPE ${unity_decoder_lib_type}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ OBJLIB_DEPS draco_unity_plugin
+ LIB_DEPS ${draco_plugin_dependency})
# For Mac, we need to build a .bundle for the unity plugin.
if(APPLE)
@@ -893,22 +1077,20 @@ else()
endif()
if(DRACO_MAYA_PLUGIN)
- draco_add_library(NAME draco_maya_plugin TYPE OBJECT SOURCES
- ${draco_maya_plug_sources} DEFINES ${draco_defines}
- INCLUDES ${draco_include_paths})
+ draco_add_library(
+ NAME draco_maya_plugin
+ TYPE OBJECT
+ SOURCES ${draco_maya_plug_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths})
- draco_add_library(NAME
- draco_maya_wrapper
- TYPE
- MODULE
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- OBJLIB_DEPS
- draco_maya_plugin
- LIB_DEPS
- ${draco_plugin_dependency})
+ draco_add_library(
+ NAME draco_maya_wrapper
+ TYPE MODULE
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ OBJLIB_DEPS draco_maya_plugin
+ LIB_DEPS ${draco_plugin_dependency})
# For Mac, we need to build a .bundle for the plugin.
if(APPLE)
@@ -917,29 +1099,44 @@ else()
endif()
# Draco app targets.
- draco_add_executable(NAME
- draco_decoder
- SOURCES
- "${draco_src_root}/tools/draco_decoder.cc"
- ${draco_io_sources}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- LIB_DEPS
- ${draco_dependency})
+ draco_add_executable(
+ NAME draco_decoder
+ SOURCES "${draco_src_root}/tools/draco_decoder.cc" ${draco_io_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ LIB_DEPS ${draco_dependency})
- draco_add_executable(NAME
- draco_encoder
- SOURCES
- "${draco_src_root}/tools/draco_encoder.cc"
- ${draco_io_sources}
- DEFINES
- ${draco_defines}
- INCLUDES
- ${draco_include_paths}
- LIB_DEPS
- ${draco_dependency})
+ draco_add_executable(
+ NAME draco_encoder
+ SOURCES "${draco_src_root}/tools/draco_encoder.cc" ${draco_io_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ LIB_DEPS ${draco_dependency})
+
+ if(DRACO_TRANSCODER_SUPPORTED)
+ draco_add_executable(
+ NAME draco_transcoder
+ SOURCES "${draco_src_root}/tools/draco_transcoder.cc"
+ "${draco_src_root}/tools/draco_transcoder_lib.cc"
+ "${draco_src_root}/tools/draco_transcoder_lib.h"
+ ${draco_io_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ LIB_DEPS ${draco_dependency})
+
+ if(DRACO_SIMPLIFIER_SUPPORTED)
+ draco_add_executable(
+ NAME draco_simplifier
+ SOURCES ${draco_pipeline_proto_header}
+ "${draco_src_root}/tools/draco_simplifier.cc"
+ "${draco_src_root}/tools/draco_simplifier_lib.cc"
+ "${draco_src_root}/tools/draco_simplifier_lib.h"
+ ${draco_io_sources}
+ DEFINES ${draco_defines}
+ INCLUDES ${draco_include_paths}
+ LIB_DEPS ${draco_dependency})
+ endif()
+ endif()
draco_setup_install_target()
draco_setup_test_targets()
diff --git a/contrib/draco/README.md b/contrib/draco/README.md
index 0d980b387..4cc717c8d 100644
--- a/contrib/draco/README.md
+++ b/contrib/draco/README.md
@@ -2,10 +2,93 @@
-[![Build Status](https://github.com/google/draco/workflows/Build/badge.svg)](https://github.com/google/draco/actions?query=workflow%3ABuild)
+[![draco-ci](https://github.com/google/draco/workflows/draco-ci/badge.svg?branch=master)](https://github.com/google/draco/actions/workflows/ci.yml)
News
=======
+
+Attention GStatic users: the Draco team strongly recommends using the versioned
+URLs for accessing Draco GStatic content. If you are using the URLs that include
+the `v1/decoders` substring within the URL, edge caching and GStatic propagation
+delays can result in transient errors that can be difficult to diagnose when
+new Draco releases are launched. To avoid the issue pin your sites to a
+versioned release.
+
+### Version 1.5.6 release:
+* Using the versioned www.gstatic.com WASM and Javascript decoders continues
+ to be recommended. To use v1.5.6, use this URL:
+ * https://www.gstatic.com/draco/versioned/decoders/1.5.6/*
+* The CMake flag DRACO_DEBUG_MSVC_WARNINGS has been replaced with
+ DRACO_DEBUG_COMPILER_WARNINGS, and the behavior has changed. It is now a
+ boolean flag defined in draco_options.cmake.
+* Bug fixes.
+* Security fixes.
+
+### Version 1.5.5 release:
+* Using the versioned www.gstatic.com WASM and Javascript decoders continues
+ to be recommended. To use v1.5.5, use this URL:
+ * https://www.gstatic.com/draco/versioned/decoders/1.5.5/*
+* Bug fix: https://github.com/google/draco/issues/935
+
+### Version 1.5.4 release:
+* Using the versioned www.gstatic.com WASM and Javascript decoders continues
+ to be recommended. To use v1.5.4, use this URL:
+ * https://www.gstatic.com/draco/versioned/decoders/1.5.4/*
+* Added partial support for glTF extensions EXT_mesh_features and
+ EXT_structural_metadata.
+* Bug fixes.
+* Security fixes.
+
+### Version 1.5.3 release:
+* Using the versioned www.gstatic.com WASM and Javascript decoders continues
+ to be recommended. To use v1.5.3, use this URL:
+ * https://www.gstatic.com/draco/versioned/decoders/1.5.3/*
+* Bug fixes.
+
+### Version 1.5.2 release
+* This is the same as v1.5.1 with the following two bug fixes:
+ * Fixes DRACO_TRANSCODER_SUPPORTED enabled builds.
+ * ABI version updated.
+
+### Version 1.5.1 release
+* Adds assertion enabled Emscripten builds to the release, and a subset of the
+ assertion enabled builds to GStatic. See the file listing below.
+* Custom paths to third party dependencies are now supported. See BUILDING.md
+ for more information.
+* The CMake configuration file draco-config.cmake is now tested and known to
+ work for using Draco in Linux, MacOS, and Windows CMake projects. See the
+ `install_test` subdirectory of `src/draco/tools` for more information.
+* Bug fixes.
+
+### Version 1.5.0 release
+* Adds the draco_transcoder tool. See the section below on the glTF transcoding
+ tool, and BUILDING.md for build and dependency information.
+* Some changes to configuration variables have been made for this release:
+ - The DRACO_GLTF flag has been renamed to DRACO_GLTF_BITSTREAM to help
+ increase understanding of its purpose, which is to limit Draco features to
+ those included in the Draco glTF specification.
+ - Variables exported in CMake via draco-config.cmake and find-draco.cmake
+ (formerly FindDraco.cmake) have been renamed. It's unlikely that this
+ impacts any existing projects as the aforementioned files were not formed
+ correctly. See [PR775](https://github.com/google/draco/pull/775) for full
+ details of the changes.
+* A CMake version file has been added.
+* The CMake install target now uses absolute paths direct from CMake instead
+ of building them using CMAKE_INSTALL_PREFIX. This was done to make Draco
+ easier to use for downstream packagers and should have little to no impact on
+ users picking up Draco from source.
+* Certain MSVC warnings have had their levels changed via compiler flag to
+ reduce the amount of noise output by the MSVC compilers. Set MSVC warning
+ level to 4, or define DRACO_DEBUG_MSVC_WARNINGS at CMake configuration time
+ to restore previous behavior.
+* Bug fixes.
+
+### Version 1.4.3 release
+* Using the versioned www.gstatic.com WASM and Javascript decoders continues
+ to be recommended. To use v1.4.3, use this URL:
+ * https://www.gstatic.com/draco/versioned/decoders/1.4.3/*
+* Bug fixes
+
### Version 1.4.1 release
* Using the versioned www.gstatic.com WASM and Javascript decoders is now
recommended. To use v1.4.1, use this URL:
@@ -129,6 +212,7 @@ _**Contents**_
* [Encoding Tool](#encoding-tool)
* [Encoding Point Clouds](#encoding-point-clouds)
* [Decoding Tool](#decoding-tool)
+ * [glTF Transcoding Tool](#gltf-transcoding-tool)
* [C++ Decoder API](#c-decoder-api)
* [Javascript Encoder API](#javascript-encoder-api)
* [Javascript Decoder API](#javascript-decoder-api)
@@ -136,6 +220,7 @@ _**Contents**_
* [Metadata API](#metadata-api)
* [NPM Package](#npm-package)
* [three.js Renderer Example](#threejs-renderer-example)
+ * [GStatic Javascript Builds](#gstatic-javascript-builds)
* [Support](#support)
* [License](#license)
* [References](#references)
@@ -170,16 +255,18 @@ Command Line Applications
------------------------
The default target created from the build files will be the `draco_encoder`
-and `draco_decoder` command line applications. For both applications, if you
-run them without any arguments or `-h`, the applications will output usage and
-options.
+and `draco_decoder` command line applications. Additionally, `draco_transcoder`
+is generated when CMake is run with the DRACO_TRANSCODER_SUPPORTED variable set
+to ON (see [BUILDING](BUILDING.md#transcoder) for more details). For all
+applications, if you run them without any arguments or `-h`, the applications
+will output usage and options.
Encoding Tool
-------------
-`draco_encoder` will read OBJ or PLY files as input, and output Draco-encoded
-files. We have included Stanford's [Bunny] mesh for testing. The basic command
-line looks like this:
+`draco_encoder` will read OBJ, STL or PLY files as input, and output
+Draco-encoded files. We have included Stanford's [Bunny] mesh for testing. The
+basic command line looks like this:
~~~~~ bash
./draco_encoder -i testdata/bun_zipper.ply -o out.drc
@@ -232,15 +319,34 @@ and denser point clouds.
Decoding Tool
-------------
-`draco_decoder` will read Draco files as input, and output OBJ or PLY files.
-The basic command line looks like this:
+`draco_decoder` will read Draco files as input, and output OBJ, STL or PLY
+files. The basic command line looks like this:
~~~~~ bash
./draco_decoder -i in.drc -o out.obj
~~~~~
+glTF Transcoding Tool
+---------------------
+
+`draco_transcoder` can be used to add Draco compression to glTF assets. The
+basic command line looks like this:
+
+~~~~~ bash
+./draco_transcoder -i in.glb -o out.glb
+~~~~~
+
+This command line will add geometry compression to all meshes in the `in.glb`
+file. Quantization values for different glTF attributes can be specified
+similarly to the `draco_encoder` tool. For example `-qp` can be used to define
+quantization of the position attribute:
+
+~~~~~ bash
+./draco_transcoder -i in.glb -o out.glb -qp 12
+~~~~~
+
C++ Decoder API
--------------
+---------------
If you'd like to add decoding to your applications you will need to include
the `draco_dec` library. In order to use the Draco decoder you need to
@@ -442,6 +548,30 @@ Javascript decoder using the `three.js` renderer.
Please see the [javascript/example/README.md](javascript/example/README.md) file for more information.
+GStatic Javascript Builds
+=========================
+
+Prebuilt versions of the Emscripten-built Draco javascript decoders are hosted
+on www.gstatic.com in version labeled directories:
+
+https://www.gstatic.com/draco/versioned/decoders/VERSION/*
+
+As of the v1.4.3 release the files available are:
+
+- [draco_decoder.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder.js)
+- [draco_decoder.wasm](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder.wasm)
+- [draco_decoder_gltf.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder_gltf.js)
+- [draco_decoder_gltf.wasm](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder_gltf.wasm)
+- [draco_wasm_wrapper.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_wasm_wrapper.js)
+- [draco_wasm_wrapper_gltf.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_wasm_wrapper_gltf.js)
+
+Beginning with the v1.5.1 release assertion enabled builds of the following
+files are available:
+
+- [draco_decoder.js](https://www.gstatic.com/draco/versioned/decoders/1.5.1/with_asserts/draco_decoder.js)
+- [draco_decoder.wasm](https://www.gstatic.com/draco/versioned/decoders/1.5.1/with_asserts/draco_decoder.wasm)
+- [draco_wasm_wrapper.js](https://www.gstatic.com/draco/versioned/decoders/1.5.1/with_asserts/draco_wasm_wrapper.js)
+
Support
=======
diff --git a/contrib/draco/cmake/DracoConfig.cmake b/contrib/draco/cmake/DracoConfig.cmake
deleted file mode 100644
index be5e1faef..000000000
--- a/contrib/draco/cmake/DracoConfig.cmake
+++ /dev/null
@@ -1,3 +0,0 @@
-@PACKAGE_INIT@
-set_and_check(draco_INCLUDE_DIR "@PACKAGE_draco_include_install_dir@")
-set_and_check(draco_LIBRARY_DIR "@PACKAGE_draco_lib_install_dir@")
diff --git a/contrib/draco/cmake/FindDraco.cmake b/contrib/draco/cmake/FindDraco.cmake
deleted file mode 100644
index 0a9193065..000000000
--- a/contrib/draco/cmake/FindDraco.cmake
+++ /dev/null
@@ -1,56 +0,0 @@
-# Finddraco
-#
-# Locates draco and sets the following variables:
-#
-# draco_FOUND draco_INCLUDE_DIRS draco_LIBARY_DIRS draco_LIBRARIES
-# draco_VERSION_STRING
-#
-# draco_FOUND is set to YES only when all other variables are successfully
-# configured.
-
-unset(draco_FOUND)
-unset(draco_INCLUDE_DIRS)
-unset(draco_LIBRARY_DIRS)
-unset(draco_LIBRARIES)
-unset(draco_VERSION_STRING)
-
-mark_as_advanced(draco_FOUND)
-mark_as_advanced(draco_INCLUDE_DIRS)
-mark_as_advanced(draco_LIBRARY_DIRS)
-mark_as_advanced(draco_LIBRARIES)
-mark_as_advanced(draco_VERSION_STRING)
-
-set(draco_version_file_no_prefix "draco/src/draco/core/draco_version.h")
-
-# Set draco_INCLUDE_DIRS
-find_path(draco_INCLUDE_DIRS NAMES "${draco_version_file_no_prefix}")
-
-# Extract the version string from draco_version.h.
-if(draco_INCLUDE_DIRS)
- set(draco_version_file
- "${draco_INCLUDE_DIRS}/draco/src/draco/core/draco_version.h")
- file(STRINGS "${draco_version_file}" draco_version REGEX "kdracoVersion")
- list(GET draco_version 0 draco_version)
- string(REPLACE "static const char kdracoVersion[] = " "" draco_version
- "${draco_version}")
- string(REPLACE ";" "" draco_version "${draco_version}")
- string(REPLACE "\"" "" draco_version "${draco_version}")
- set(draco_VERSION_STRING ${draco_version})
-endif()
-
-# Find the library.
-if(BUILD_SHARED_LIBS)
- find_library(draco_LIBRARIES NAMES draco.dll libdraco.dylib libdraco.so)
-else()
- find_library(draco_LIBRARIES NAMES draco.lib libdraco.a)
-endif()
-
-# Store path to library.
-get_filename_component(draco_LIBRARY_DIRS ${draco_LIBRARIES} DIRECTORY)
-
-if(draco_INCLUDE_DIRS
- AND draco_LIBRARY_DIRS
- AND draco_LIBRARIES
- AND draco_VERSION_STRING)
- set(draco_FOUND YES)
-endif()
diff --git a/contrib/draco/cmake/compiler_flags.cmake b/contrib/draco/cmake/compiler_flags.cmake
deleted file mode 100644
index 8750e6f7d..000000000
--- a/contrib/draco/cmake/compiler_flags.cmake
+++ /dev/null
@@ -1,220 +0,0 @@
-if(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_)
- return()
-endif()
-set(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_ 1)
-
-include(CheckCCompilerFlag)
-include(CheckCXXCompilerFlag)
-include("${draco_root}/cmake/compiler_tests.cmake")
-
-# Strings used to cache failed C/CXX flags.
-set(DRACO_FAILED_C_FLAGS)
-set(DRACO_FAILED_CXX_FLAGS)
-
-# Checks C compiler for support of $c_flag. Adds $c_flag to $CMAKE_C_FLAGS when
-# the compile test passes. Caches $c_flag in $DRACO_FAILED_C_FLAGS when the test
-# fails.
-macro(add_c_flag_if_supported c_flag)
- unset(C_FLAG_FOUND CACHE)
- string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND)
- unset(C_FLAG_FAILED CACHE)
- string(FIND "${DRACO_FAILED_C_FLAGS}" "${c_flag}" C_FLAG_FAILED)
-
- if(${C_FLAG_FOUND} EQUAL -1 AND ${C_FLAG_FAILED} EQUAL -1)
- unset(C_FLAG_SUPPORTED CACHE)
- message("Checking C compiler flag support for: " ${c_flag})
- check_c_compiler_flag("${c_flag}" C_FLAG_SUPPORTED)
- if(${C_FLAG_SUPPORTED})
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${c_flag}" CACHE STRING "")
- else()
- set(DRACO_FAILED_C_FLAGS
- "${DRACO_FAILED_C_FLAGS} ${c_flag}"
- CACHE STRING "" FORCE)
- endif()
- endif()
-endmacro()
-
-# Checks C++ compiler for support of $cxx_flag. Adds $cxx_flag to
-# $CMAKE_CXX_FLAGS when the compile test passes. Caches $c_flag in
-# $DRACO_FAILED_CXX_FLAGS when the test fails.
-macro(add_cxx_flag_if_supported cxx_flag)
- unset(CXX_FLAG_FOUND CACHE)
- string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND)
- unset(CXX_FLAG_FAILED CACHE)
- string(FIND "${DRACO_FAILED_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FAILED)
-
- if(${CXX_FLAG_FOUND} EQUAL -1 AND ${CXX_FLAG_FAILED} EQUAL -1)
- unset(CXX_FLAG_SUPPORTED CACHE)
- message("Checking CXX compiler flag support for: " ${cxx_flag})
- check_cxx_compiler_flag("${cxx_flag}" CXX_FLAG_SUPPORTED)
- if(${CXX_FLAG_SUPPORTED})
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${cxx_flag}" CACHE STRING "")
- else()
- set(DRACO_FAILED_CXX_FLAGS
- "${DRACO_FAILED_CXX_FLAGS} ${cxx_flag}"
- CACHE STRING "" FORCE)
- endif()
- endif()
-endmacro()
-
-# Convenience method for adding a flag to both the C and C++ compiler command
-# lines.
-macro(add_compiler_flag_if_supported flag)
- add_c_flag_if_supported(${flag})
- add_cxx_flag_if_supported(${flag})
-endmacro()
-
-# Checks C compiler for support of $c_flag and terminates generation when
-# support is not present.
-macro(require_c_flag c_flag update_c_flags)
- unset(C_FLAG_FOUND CACHE)
- string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND)
-
- if(${C_FLAG_FOUND} EQUAL -1)
- unset(HAVE_C_FLAG CACHE)
- message("Checking C compiler flag support for: " ${c_flag})
- check_c_compiler_flag("${c_flag}" HAVE_C_FLAG)
- if(NOT ${HAVE_C_FLAG})
- message(
- FATAL_ERROR "${PROJECT_NAME} requires support for C flag: ${c_flag}.")
- endif()
- if(${update_c_flags})
- set(CMAKE_C_FLAGS "${c_flag} ${CMAKE_C_FLAGS}" CACHE STRING "" FORCE)
- endif()
- endif()
-endmacro()
-
-# Checks CXX compiler for support of $cxx_flag and terminates generation when
-# support is not present.
-macro(require_cxx_flag cxx_flag update_cxx_flags)
- unset(CXX_FLAG_FOUND CACHE)
- string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND)
-
- if(${CXX_FLAG_FOUND} EQUAL -1)
- unset(HAVE_CXX_FLAG CACHE)
- message("Checking CXX compiler flag support for: " ${cxx_flag})
- check_cxx_compiler_flag("${cxx_flag}" HAVE_CXX_FLAG)
- if(NOT ${HAVE_CXX_FLAG})
- message(
- FATAL_ERROR
- "${PROJECT_NAME} requires support for CXX flag: ${cxx_flag}.")
- endif()
- if(${update_cxx_flags})
- set(CMAKE_CXX_FLAGS
- "${cxx_flag} ${CMAKE_CXX_FLAGS}"
- CACHE STRING "" FORCE)
- endif()
- endif()
-endmacro()
-
-# Checks for support of $flag by both the C and CXX compilers. Terminates
-# generation when support is not present in both compilers.
-macro(require_compiler_flag flag update_cmake_flags)
- require_c_flag(${flag} ${update_cmake_flags})
- require_cxx_flag(${flag} ${update_cmake_flags})
-endmacro()
-
-# Checks only non-MSVC targets for support of $c_flag and terminates generation
-# when support is not present.
-macro(require_c_flag_nomsvc c_flag update_c_flags)
- if(NOT MSVC)
- require_c_flag(${c_flag} ${update_c_flags})
- endif()
-endmacro()
-
-# Checks only non-MSVC targets for support of $cxx_flag and terminates
-# generation when support is not present.
-macro(require_cxx_flag_nomsvc cxx_flag update_cxx_flags)
- if(NOT MSVC)
- require_cxx_flag(${cxx_flag} ${update_cxx_flags})
- endif()
-endmacro()
-
-# Checks only non-MSVC targets for support of $flag by both the C and CXX
-# compilers. Terminates generation when support is not present in both
-# compilers.
-macro(require_compiler_flag_nomsvc flag update_cmake_flags)
- require_c_flag_nomsvc(${flag} ${update_cmake_flags})
- require_cxx_flag_nomsvc(${flag} ${update_cmake_flags})
-endmacro()
-
-# Adds $flag to assembler command line.
-macro(append_as_flag flag)
- unset(AS_FLAG_FOUND CACHE)
- string(FIND "${DRACO_AS_FLAGS}" "${flag}" AS_FLAG_FOUND)
-
- if(${AS_FLAG_FOUND} EQUAL -1)
- set(DRACO_AS_FLAGS "${DRACO_AS_FLAGS} ${flag}")
- endif()
-endmacro()
-
-# Adds $flag to the C compiler command line.
-macro(append_c_flag flag)
- unset(C_FLAG_FOUND CACHE)
- string(FIND "${CMAKE_C_FLAGS}" "${flag}" C_FLAG_FOUND)
-
- if(${C_FLAG_FOUND} EQUAL -1)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
- endif()
-endmacro()
-
-# Adds $flag to the CXX compiler command line.
-macro(append_cxx_flag flag)
- unset(CXX_FLAG_FOUND CACHE)
- string(FIND "${CMAKE_CXX_FLAGS}" "${flag}" CXX_FLAG_FOUND)
-
- if(${CXX_FLAG_FOUND} EQUAL -1)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
- endif()
-endmacro()
-
-# Adds $flag to the C and CXX compiler command lines.
-macro(append_compiler_flag flag)
- append_c_flag(${flag})
- append_cxx_flag(${flag})
-endmacro()
-
-# Adds $flag to the executable linker command line.
-macro(append_exe_linker_flag flag)
- unset(LINKER_FLAG_FOUND CACHE)
- string(FIND "${CMAKE_EXE_LINKER_FLAGS}" "${flag}" LINKER_FLAG_FOUND)
-
- if(${LINKER_FLAG_FOUND} EQUAL -1)
- set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${flag}")
- endif()
-endmacro()
-
-# Adds $flag to the link flags for $target.
-function(append_link_flag_to_target target flags)
- unset(target_link_flags)
- get_target_property(target_link_flags ${target} LINK_FLAGS)
-
- if(target_link_flags)
- unset(link_flag_found)
- string(FIND "${target_link_flags}" "${flags}" link_flag_found)
-
- if(NOT ${link_flag_found} EQUAL -1)
- return()
- endif()
-
- set(target_link_flags "${target_link_flags} ${flags}")
- else()
- set(target_link_flags "${flags}")
- endif()
-
- set_target_properties(${target} PROPERTIES LINK_FLAGS ${target_link_flags})
-endfunction()
-
-# Adds $flag to executable linker flags, and makes sure C/CXX builds still work.
-macro(require_linker_flag flag)
- append_exe_linker_flag(${flag})
-
- unset(c_passed)
- draco_check_c_compiles("LINKER_FLAG_C_TEST(${flag})" "" c_passed)
- unset(cxx_passed)
- draco_check_cxx_compiles("LINKER_FLAG_CXX_TEST(${flag})" "" cxx_passed)
-
- if(NOT c_passed OR NOT cxx_passed)
- message(FATAL_ERROR "Linker flag test for ${flag} failed.")
- endif()
-endmacro()
diff --git a/contrib/draco/cmake/compiler_tests.cmake b/contrib/draco/cmake/compiler_tests.cmake
deleted file mode 100644
index e781a6537..000000000
--- a/contrib/draco/cmake/compiler_tests.cmake
+++ /dev/null
@@ -1,103 +0,0 @@
-if(DRACO_CMAKE_COMPILER_TESTS_CMAKE_)
- return()
-endif()
-set(DRACO_CMAKE_COMPILER_TESTS_CMAKE_ 1)
-
-include(CheckCSourceCompiles)
-include(CheckCXXSourceCompiles)
-
-# The basic main() macro used in all compile tests.
-set(DRACO_C_MAIN "\nint main(void) { return 0; }")
-set(DRACO_CXX_MAIN "\nint main() { return 0; }")
-
-# Strings containing the names of passed and failed tests.
-set(DRACO_C_PASSED_TESTS)
-set(DRACO_C_FAILED_TESTS)
-set(DRACO_CXX_PASSED_TESTS)
-set(DRACO_CXX_FAILED_TESTS)
-
-macro(draco_push_var var new_value)
- set(SAVED_${var} ${var})
- set(${var} ${new_value})
-endmacro()
-
-macro(draco_pop_var var)
- set(var ${SAVED_${var}})
- unset(SAVED_${var})
-endmacro()
-
-# Confirms $test_source compiles and stores $test_name in one of
-# $DRACO_C_PASSED_TESTS or $DRACO_C_FAILED_TESTS depending on out come. When the
-# test passes $result_var is set to 1. When it fails $result_var is unset. The
-# test is not run if the test name is found in either of the passed or failed
-# test variables.
-macro(draco_check_c_compiles test_name test_source result_var)
- unset(C_TEST_PASSED CACHE)
- unset(C_TEST_FAILED CACHE)
- string(FIND "${DRACO_C_PASSED_TESTS}" "${test_name}" C_TEST_PASSED)
- string(FIND "${DRACO_C_FAILED_TESTS}" "${test_name}" C_TEST_FAILED)
- if(${C_TEST_PASSED} EQUAL -1 AND ${C_TEST_FAILED} EQUAL -1)
- unset(C_TEST_COMPILED CACHE)
- message("Running C compiler test: ${test_name}")
- check_c_source_compiles("${test_source} ${DRACO_C_MAIN}" C_TEST_COMPILED)
- set(${result_var} ${C_TEST_COMPILED})
-
- if(${C_TEST_COMPILED})
- set(DRACO_C_PASSED_TESTS "${DRACO_C_PASSED_TESTS} ${test_name}")
- else()
- set(DRACO_C_FAILED_TESTS "${DRACO_C_FAILED_TESTS} ${test_name}")
- message("C Compiler test ${test_name} failed.")
- endif()
- elseif(NOT ${C_TEST_PASSED} EQUAL -1)
- set(${result_var} 1)
- else() # ${C_TEST_FAILED} NOT EQUAL -1
- unset(${result_var})
- endif()
-endmacro()
-
-# Confirms $test_source compiles and stores $test_name in one of
-# $DRACO_CXX_PASSED_TESTS or $DRACO_CXX_FAILED_TESTS depending on out come. When
-# the test passes $result_var is set to 1. When it fails $result_var is unset.
-# The test is not run if the test name is found in either of the passed or
-# failed test variables.
-macro(draco_check_cxx_compiles test_name test_source result_var)
- unset(CXX_TEST_PASSED CACHE)
- unset(CXX_TEST_FAILED CACHE)
- string(FIND "${DRACO_CXX_PASSED_TESTS}" "${test_name}" CXX_TEST_PASSED)
- string(FIND "${DRACO_CXX_FAILED_TESTS}" "${test_name}" CXX_TEST_FAILED)
- if(${CXX_TEST_PASSED} EQUAL -1 AND ${CXX_TEST_FAILED} EQUAL -1)
- unset(CXX_TEST_COMPILED CACHE)
- message("Running CXX compiler test: ${test_name}")
- check_cxx_source_compiles("${test_source} ${DRACO_CXX_MAIN}"
- CXX_TEST_COMPILED)
- set(${result_var} ${CXX_TEST_COMPILED})
-
- if(${CXX_TEST_COMPILED})
- set(DRACO_CXX_PASSED_TESTS "${DRACO_CXX_PASSED_TESTS} ${test_name}")
- else()
- set(DRACO_CXX_FAILED_TESTS "${DRACO_CXX_FAILED_TESTS} ${test_name}")
- message("CXX Compiler test ${test_name} failed.")
- endif()
- elseif(NOT ${CXX_TEST_PASSED} EQUAL -1)
- set(${result_var} 1)
- else() # ${CXX_TEST_FAILED} NOT EQUAL -1
- unset(${result_var})
- endif()
-endmacro()
-
-# Convenience macro that confirms $test_source compiles as C and C++.
-# $result_var is set to 1 when both tests are successful, and 0 when one or both
-# tests fail. Note: This macro is intended to be used to write to result
-# variables that are expanded via configure_file(). $result_var is set to 1 or 0
-# to allow direct usage of the value in generated source files.
-macro(draco_check_source_compiles test_name test_source result_var)
- unset(C_PASSED)
- unset(CXX_PASSED)
- draco_check_c_compiles(${test_name} ${test_source} C_PASSED)
- draco_check_cxx_compiles(${test_name} ${test_source} CXX_PASSED)
- if(${C_PASSED} AND ${CXX_PASSED})
- set(${result_var} 1)
- else()
- set(${result_var} 0)
- endif()
-endmacro()
diff --git a/contrib/draco/cmake/draco-config.cmake.template b/contrib/draco/cmake/draco-config.cmake.template
index ca4a456bf..ed86823ea 100644
--- a/contrib/draco/cmake/draco-config.cmake.template
+++ b/contrib/draco/cmake/draco-config.cmake.template
@@ -1,2 +1,3 @@
-set(DRACO_INCLUDE_DIRS "@DRACO_INCLUDE_DIRS@")
-set(DRACO_LIBRARIES "draco")
+@PACKAGE_INIT@
+
+include("${CMAKE_CURRENT_LIST_DIR}/draco-targets.cmake")
diff --git a/contrib/draco/cmake/draco.pc.template b/contrib/draco/cmake/draco.pc.template
index b8ae48212..050219ccb 100644
--- a/contrib/draco/cmake/draco.pc.template
+++ b/contrib/draco/cmake/draco.pc.template
@@ -1,11 +1,6 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
Name: @PROJECT_NAME@
Description: Draco geometry de(com)pression library.
Version: @DRACO_VERSION@
-Cflags: -I${includedir}
-Libs: -L${libdir} -ldraco
+Cflags: -I@includes_path@
+Libs: -L@libs_path@ -ldraco
Libs.private: @CMAKE_THREAD_LIBS_INIT@
diff --git a/contrib/draco/cmake/draco_build_definitions.cmake b/contrib/draco/cmake/draco_build_definitions.cmake
index f7354c15f..4dc232333 100644
--- a/contrib/draco/cmake/draco_build_definitions.cmake
+++ b/contrib/draco/cmake/draco_build_definitions.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_
@@ -17,10 +31,6 @@ macro(set_draco_target)
endif()
set(draco_plugin_dependency draco_static)
endif()
-
- if(BUILD_SHARED_LIBS)
- set(CMAKE_POSITION_INDEPENDENT_CODE ON)
- endif()
endmacro()
# Configures flags and sets build system globals.
@@ -36,23 +46,37 @@ macro(draco_set_build_definitions)
endif()
draco_load_version_info()
- set(DRACO_SOVERSION 1)
+
+ # Library version info. See the libtool docs for updating the values:
+ # https://www.gnu.org/software/libtool/manual/libtool.html#Updating-version-info
+ #
+ # c=, r=, a=
+ #
+ # libtool generates a .so file as .so.[c-a].a.r, while -version-info c:r:a is
+ # passed to libtool.
+ #
+ # We set DRACO_SOVERSION = [c-a].a.r
+ set(LT_CURRENT 8)
+ set(LT_REVISION 0)
+ set(LT_AGE 0)
+ math(EXPR DRACO_SOVERSION_MAJOR "${LT_CURRENT} - ${LT_AGE}")
+ set(DRACO_SOVERSION "${DRACO_SOVERSION_MAJOR}.${LT_AGE}.${LT_REVISION}")
+ unset(LT_CURRENT)
+ unset(LT_REVISION)
+ unset(LT_AGE)
list(APPEND draco_include_paths "${draco_root}" "${draco_root}/src"
"${draco_build}")
- if(DRACO_ABSL)
- list(APPEND draco_include_path "${draco_root}/third_party/abseil-cpp")
+ if(DRACO_TRANSCODER_SUPPORTED)
+ draco_setup_eigen()
+ draco_setup_filesystem()
+ draco_setup_tinygltf()
+
+
endif()
- list(APPEND draco_gtest_include_paths
- "${draco_root}/../googletest/googlemock/include"
- "${draco_root}/../googletest/googlemock"
- "${draco_root}/../googletest/googletest/include"
- "${draco_root}/../googletest/googletest")
- list(APPEND draco_test_include_paths ${draco_include_paths}
- ${draco_gtest_include_paths})
list(APPEND draco_defines "DRACO_CMAKE=1"
"DRACO_FLAGS_SRCDIR=\"${draco_root}\""
"DRACO_FLAGS_TMPDIR=\"/tmp\"")
@@ -63,11 +87,22 @@ macro(draco_set_build_definitions)
if(BUILD_SHARED_LIBS)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
endif()
- else()
+ endif()
+
+ if(NOT MSVC)
if(${CMAKE_SIZEOF_VOID_P} EQUAL 8)
# Ensure 64-bit platforms can support large files.
list(APPEND draco_defines "_LARGEFILE_SOURCE" "_FILE_OFFSET_BITS=64")
endif()
+
+ if(NOT DRACO_DEBUG_COMPILER_WARNINGS)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ list(APPEND draco_clang_cxx_flags
+ "-Wno-implicit-const-int-float-conversion")
+ else()
+ list(APPEND draco_base_cxx_flags "-Wno-deprecated-declarations")
+ endif()
+ endif()
endif()
if(ANDROID)
@@ -102,13 +137,9 @@ macro(draco_set_build_definitions)
set(draco_neon_source_file_suffix "neon.cc")
set(draco_sse4_source_file_suffix "sse4.cc")
- if((${CMAKE_CXX_COMPILER_ID}
- STREQUAL
- "GNU"
- AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 5)
- OR (${CMAKE_CXX_COMPILER_ID}
- STREQUAL
- "Clang"
+ if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" AND ${CMAKE_CXX_COMPILER_VERSION}
+ VERSION_LESS 5)
+ OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang"
AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4))
message(
WARNING "GNU/GCC < v5 or Clang/LLVM < v4, ENABLING COMPATIBILITY MODE.")
@@ -117,7 +148,9 @@ macro(draco_set_build_definitions)
if(EMSCRIPTEN)
draco_check_emscripten_environment()
- draco_get_required_emscripten_flags(FLAG_LIST_VAR draco_base_cxx_flags)
+ draco_get_required_emscripten_flags(
+ FLAG_LIST_VAR_COMPILER draco_base_cxx_flags
+ FLAG_LIST_VAR_LINKER draco_base_exe_linker_flags)
endif()
draco_configure_sanitizer()
diff --git a/contrib/draco/cmake/draco_cpu_detection.cmake b/contrib/draco/cmake/draco_cpu_detection.cmake
index 96e4a289b..c3b77b80c 100644
--- a/contrib/draco/cmake/draco_cpu_detection.cmake
+++ b/contrib/draco/cmake/draco_cpu_detection.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_
diff --git a/contrib/draco/cmake/draco_dependencies.cmake b/contrib/draco/cmake/draco_dependencies.cmake
new file mode 100644
index 000000000..91ee0839b
--- /dev/null
+++ b/contrib/draco/cmake/draco_dependencies.cmake
@@ -0,0 +1,136 @@
+# Copyright 2022 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+if(DRACO_CMAKE_DRACO_DEPENDENCIES_CMAKE)
+ return()
+endif()
+set(DRACO_CMAKE_DRACO_DEPENDENCIES_CMAKE 1)
+
+include("${draco_root}/cmake/draco_variables.cmake")
+
+# Each variable holds a user specified custom path to a local copy of the
+# sources that belong to each project that Draco depends on. When paths are
+# empty the build will be generated pointing to the Draco git submodules.
+# Otherwise the paths specified by the user will be used in the build
+# configuration.
+
+# Path to the Eigen. The path must contain the Eigen directory.
+set(DRACO_EIGEN_PATH)
+draco_track_configuration_variable(DRACO_EIGEN_PATH)
+
+# Path to the gulrak/filesystem installation. The path specified must contain
+# the ghc subdirectory that houses the filesystem includes.
+set(DRACO_FILESYSTEM_PATH)
+draco_track_configuration_variable(DRACO_FILESYSTEM_PATH)
+
+# Path to the googletest installation. The path must be to the root of the
+# Googletest project directory.
+set(DRACO_GOOGLETEST_PATH)
+draco_track_configuration_variable(DRACO_GOOGLETEST_PATH)
+
+# Path to the syoyo/tinygltf installation. The path must be to the root of the
+# project directory.
+set(DRACO_TINYGLTF_PATH)
+draco_track_configuration_variable(DRACO_TINYGLTF_PATH)
+
+# Utility macro for killing the build due to a missing submodule directory.
+macro(draco_die_missing_submodule dir)
+ message(FATAL_ERROR "${dir} missing, run git submodule update --init")
+endmacro()
+
+# Determines the Eigen location and updates the build configuration accordingly.
+macro(draco_setup_eigen)
+ if(DRACO_EIGEN_PATH)
+ set(eigen_path "${DRACO_EIGEN_PATH}")
+
+ if(NOT IS_DIRECTORY "${eigen_path}")
+ message(FATAL_ERROR "DRACO_EIGEN_PATH does not exist.")
+ endif()
+ else()
+ set(eigen_path "${draco_root}/third_party/eigen")
+
+ if(NOT IS_DIRECTORY "${eigen_path}")
+ draco_die_missing_submodule("${eigen_path}")
+ endif()
+ endif()
+
+ set(eigen_include_path "${eigen_path}/Eigen")
+
+ if(NOT EXISTS "${eigen_path}/Eigen")
+ message(FATAL_ERROR "The eigen path does not contain an Eigen directory.")
+ endif()
+
+ list(APPEND draco_include_paths "${eigen_path}")
+endmacro()
+
+# Determines the gulrak/filesystem location and updates the build configuration
+# accordingly.
+macro(draco_setup_filesystem)
+ if(DRACO_FILESYSTEM_PATH)
+ set(fs_path "${DRACO_FILESYSTEM_PATH}")
+
+ if(NOT IS_DIRECTORY "${fs_path}")
+ message(FATAL_ERROR "DRACO_FILESYSTEM_PATH does not exist.")
+ endif()
+ else()
+ set(fs_path "${draco_root}/third_party/filesystem/include")
+
+ if(NOT IS_DIRECTORY "${fs_path}")
+ draco_die_missing_submodule("${fs_path}")
+ endif()
+ endif()
+
+ list(APPEND draco_include_paths "${fs_path}")
+endmacro()
+
+# Determines the Googletest location and sets up include and source list vars
+# for the draco_tests build.
+macro(draco_setup_googletest)
+ if(DRACO_GOOGLETEST_PATH)
+ set(gtest_path "${DRACO_GOOGLETEST_PATH}")
+ if(NOT IS_DIRECTORY "${gtest_path}")
+ message(FATAL_ERROR "DRACO_GOOGLETEST_PATH does not exist.")
+ endif()
+ else()
+ set(gtest_path "${draco_root}/third_party/googletest")
+ endif()
+
+ list(APPEND draco_test_include_paths ${draco_include_paths}
+ "${gtest_path}/include" "${gtest_path}/googlemock"
+ "${gtest_path}/googletest/include" "${gtest_path}/googletest")
+
+ list(APPEND draco_gtest_all "${gtest_path}/googletest/src/gtest-all.cc")
+ list(APPEND draco_gtest_main "${gtest_path}/googletest/src/gtest_main.cc")
+endmacro()
+
+
+# Determines the location of TinyGLTF and updates the build configuration
+# accordingly.
+macro(draco_setup_tinygltf)
+ if(DRACO_TINYGLTF_PATH)
+ set(tinygltf_path "${DRACO_TINYGLTF_PATH}")
+
+ if(NOT IS_DIRECTORY "${tinygltf_path}")
+ message(FATAL_ERROR "DRACO_TINYGLTF_PATH does not exist.")
+ endif()
+ else()
+ set(tinygltf_path "${draco_root}/third_party/tinygltf")
+
+ if(NOT IS_DIRECTORY "${tinygltf_path}")
+ draco_die_missing_submodule("${tinygltf_path}")
+ endif()
+ endif()
+
+ list(APPEND draco_include_paths "${tinygltf_path}")
+endmacro()
diff --git a/contrib/draco/cmake/draco_emscripten.cmake b/contrib/draco/cmake/draco_emscripten.cmake
index 10c935043..c9616ae86 100644
--- a/contrib/draco/cmake/draco_emscripten.cmake
+++ b/contrib/draco/cmake/draco_emscripten.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_
@@ -18,39 +32,64 @@ endmacro()
# Obtains the required Emscripten flags for Draco targets.
macro(draco_get_required_emscripten_flags)
- set(em_FLAG_LIST_VAR)
+ set(em_FLAG_LIST_VAR_COMPILER)
+ set(em_FLAG_LIST_VAR_LINKER)
set(em_flags)
- set(em_single_arg_opts FLAG_LIST_VAR)
+ set(em_single_arg_opts FLAG_LIST_VAR_COMPILER FLAG_LIST_VAR_LINKER)
set(em_multi_arg_opts)
cmake_parse_arguments(em "${em_flags}" "${em_single_arg_opts}"
"${em_multi_arg_opts}" ${ARGN})
- if(NOT em_FLAG_LIST_VAR)
- message(FATAL "draco_get_required_emscripten_flags: FLAG_LIST_VAR required")
+ if(NOT em_FLAG_LIST_VAR_COMPILER)
+ message(
+ FATAL
+ "draco_get_required_emscripten_flags: FLAG_LIST_VAR_COMPILER required")
+ endif()
+
+ if(NOT em_FLAG_LIST_VAR_LINKER)
+ message(
+ FATAL
+ "draco_get_required_emscripten_flags: FLAG_LIST_VAR_LINKER required")
endif()
if(DRACO_JS_GLUE)
unset(required_flags)
- list(APPEND ${em_FLAG_LIST_VAR} "-sALLOW_MEMORY_GROWTH=1")
- list(APPEND ${em_FLAG_LIST_VAR} "-Wno-almost-asm")
- list(APPEND ${em_FLAG_LIST_VAR} "--memory-init-file" "0")
- list(APPEND ${em_FLAG_LIST_VAR} "-fno-omit-frame-pointer")
- list(APPEND ${em_FLAG_LIST_VAR} "-sMODULARIZE=1")
- list(APPEND ${em_FLAG_LIST_VAR} "-sNO_FILESYSTEM=1")
- list(APPEND ${em_FLAG_LIST_VAR} "-sEXPORTED_RUNTIME_METHODS=[]")
- list(APPEND ${em_FLAG_LIST_VAR} "-sPRECISE_F32=1")
- list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_EXIT=0")
- list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_REJECTION=0")
+ # TODO(tomfinegan): Revisit splitting of compile/link flags for Emscripten,
+ # and drop -Wno-unused-command-line-argument. Emscripten complains about
+ # what are supposedly link-only flags sent with compile commands, but then
+ # proceeds to produce broken code if the warnings are heeded.
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER}
+ "-Wno-unused-command-line-argument")
+
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-Wno-almost-asm")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "--memory-init-file" "0")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-fno-omit-frame-pointer")
+
+ # According to Emscripten the following flags are linker only, but sending
+ # these flags (en masse) to only the linker results in a broken Emscripten
+ # build with an empty DracoDecoderModule.
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sALLOW_MEMORY_GROWTH=1")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sMODULARIZE=1")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sFILESYSTEM=0")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER}
+ "-sEXPORTED_FUNCTIONS=[\"_free\",\"_malloc\"]")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sPRECISE_F32=1")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sNODEJS_CATCH_EXIT=0")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sNODEJS_CATCH_REJECTION=0")
if(DRACO_FAST)
- list(APPEND ${em_FLAG_LIST_VAR} "--llvm-lto" "1")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "--llvm-lto" "1")
endif()
+
+ # The WASM flag is reported as linker only.
if(DRACO_WASM)
- list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=1")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sWASM=1")
else()
- list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=0")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sWASM=0")
endif()
+
+ # The LEGACY_VM_SUPPORT flag is reported as linker only.
if(DRACO_IE_COMPATIBLE)
- list(APPEND ${em_FLAG_LIST_VAR} "-sLEGACY_VM_SUPPORT=1")
+ list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sLEGACY_VM_SUPPORT=1")
endif()
endif()
endmacro()
@@ -66,10 +105,11 @@ macro(draco_generate_emscripten_glue)
"${glue_multi_arg_opts}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
- message("--------- draco_generate_emscripten_glue -----------\n"
- "glue_INPUT_IDL=${glue_INPUT_IDL}\n"
- "glue_OUTPUT_PATH=${glue_OUTPUT_PATH}\n" ]
- "----------------------------------------------------\n")
+ message(
+ "--------- draco_generate_emscripten_glue -----------\n"
+ "glue_INPUT_IDL=${glue_INPUT_IDL}\n"
+ "glue_OUTPUT_PATH=${glue_OUTPUT_PATH}\n"
+ "----------------------------------------------------\n")
endif()
if(NOT glue_INPUT_IDL OR NOT glue_OUTPUT_PATH)
@@ -79,22 +119,22 @@ macro(draco_generate_emscripten_glue)
endif()
# Generate the glue source.
- execute_process(COMMAND ${PYTHON_EXECUTABLE}
- $ENV{EMSCRIPTEN}/tools/webidl_binder.py
- ${glue_INPUT_IDL} ${glue_OUTPUT_PATH})
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} $ENV{EMSCRIPTEN}/tools/webidl_binder.py
+ ${glue_INPUT_IDL} ${glue_OUTPUT_PATH})
if(NOT EXISTS "${glue_OUTPUT_PATH}.cpp")
message(FATAL_ERROR "JS glue generation failed for ${glue_INPUT_IDL}.")
endif()
# Create a dependency so that it regenerated on edits.
- add_custom_command(OUTPUT "${glue_OUTPUT_PATH}.cpp"
- COMMAND ${PYTHON_EXECUTABLE}
- $ENV{EMSCRIPTEN}/tools/webidl_binder.py
- ${glue_INPUT_IDL} ${glue_OUTPUT_PATH}
- DEPENDS ${draco_js_dec_idl}
- COMMENT "Generating ${glue_OUTPUT_PATH}.cpp."
- WORKING_DIRECTORY ${draco_build}
- VERBATIM)
+ add_custom_command(
+ OUTPUT "${glue_OUTPUT_PATH}.cpp"
+ COMMAND ${PYTHON_EXECUTABLE} $ENV{EMSCRIPTEN}/tools/webidl_binder.py
+ ${glue_INPUT_IDL} ${glue_OUTPUT_PATH}
+ DEPENDS ${draco_js_dec_idl}
+ COMMENT "Generating ${glue_OUTPUT_PATH}.cpp."
+ WORKING_DIRECTORY ${draco_build}
+ VERBATIM)
endmacro()
# Wrapper for draco_add_executable() that handles the extra work necessary for
@@ -120,8 +160,14 @@ macro(draco_add_emscripten_executable)
unset(emexe_LINK_FLAGS)
set(optional_args)
set(single_value_args NAME GLUE_PATH)
- set(multi_value_args SOURCES DEFINES FEATURES INCLUDES LINK_FLAGS
- PRE_LINK_JS_SOURCES POST_LINK_JS_SOURCES)
+ set(multi_value_args
+ SOURCES
+ DEFINES
+ FEATURES
+ INCLUDES
+ LINK_FLAGS
+ PRE_LINK_JS_SOURCES
+ POST_LINK_JS_SOURCES)
cmake_parse_arguments(emexe "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
@@ -136,49 +182,50 @@ macro(draco_add_emscripten_executable)
endif()
if(DRACO_VERBOSE GREATER 1)
- message("--------- draco_add_emscripten_executable ---------\n"
- "emexe_NAME=${emexe_NAME}\n"
- "emexe_SOURCES=${emexe_SOURCES}\n"
- "emexe_DEFINES=${emexe_DEFINES}\n"
- "emexe_INCLUDES=${emexe_INCLUDES}\n"
- "emexe_LINK_FLAGS=${emexe_LINK_FLAGS}\n"
- "emexe_GLUE_PATH=${emexe_GLUE_PATH}\n"
- "emexe_FEATURES=${emexe_FEATURES}\n"
- "emexe_PRE_LINK_JS_SOURCES=${emexe_PRE_LINK_JS_SOURCES}\n"
- "emexe_POST_LINK_JS_SOURCES=${emexe_POST_LINK_JS_SOURCES}\n"
- "----------------------------------------------------\n")
+ message(
+ "--------- draco_add_emscripten_executable ---------\n"
+ "emexe_NAME=${emexe_NAME}\n"
+ "emexe_SOURCES=${emexe_SOURCES}\n"
+ "emexe_DEFINES=${emexe_DEFINES}\n"
+ "emexe_INCLUDES=${emexe_INCLUDES}\n"
+ "emexe_LINK_FLAGS=${emexe_LINK_FLAGS}\n"
+ "emexe_GLUE_PATH=${emexe_GLUE_PATH}\n"
+ "emexe_FEATURES=${emexe_FEATURES}\n"
+ "emexe_PRE_LINK_JS_SOURCES=${emexe_PRE_LINK_JS_SOURCES}\n"
+ "emexe_POST_LINK_JS_SOURCES=${emexe_POST_LINK_JS_SOURCES}\n"
+ "----------------------------------------------------\n")
endif()
# The Emscripten linker needs the C++ flags in addition to whatever has been
# passed in with the target.
list(APPEND emexe_LINK_FLAGS ${DRACO_CXX_FLAGS})
- if(DRACO_GLTF)
- draco_add_executable(NAME
- ${emexe_NAME}
- OUTPUT_NAME
- ${emexe_NAME}_gltf
- SOURCES
- ${emexe_SOURCES}
- DEFINES
- ${emexe_DEFINES}
- INCLUDES
- ${emexe_INCLUDES}
- LINK_FLAGS
- ${emexe_LINK_FLAGS})
+ if(DRACO_GLTF_BITSTREAM)
+ # Add "_gltf" suffix to target output name.
+ draco_add_executable(
+ NAME ${emexe_NAME}
+ OUTPUT_NAME ${emexe_NAME}_gltf
+ SOURCES ${emexe_SOURCES}
+ DEFINES ${emexe_DEFINES}
+ INCLUDES ${emexe_INCLUDES}
+ LINK_FLAGS ${emexe_LINK_FLAGS})
else()
- draco_add_executable(NAME ${emexe_NAME} SOURCES ${emexe_SOURCES} DEFINES
- ${emexe_DEFINES} INCLUDES ${emexe_INCLUDES} LINK_FLAGS
- ${emexe_LINK_FLAGS})
+ draco_add_executable(
+ NAME ${emexe_NAME}
+ SOURCES ${emexe_SOURCES}
+ DEFINES ${emexe_DEFINES}
+ INCLUDES ${emexe_INCLUDES}
+ LINK_FLAGS ${emexe_LINK_FLAGS})
endif()
foreach(feature ${emexe_FEATURES})
draco_enable_feature(FEATURE ${feature} TARGETS ${emexe_NAME})
endforeach()
- set_property(SOURCE ${emexe_SOURCES}
- APPEND
- PROPERTY OBJECT_DEPENDS "${emexe_GLUE_PATH}.cpp")
+ set_property(
+ SOURCE ${emexe_SOURCES}
+ APPEND
+ PROPERTY OBJECT_DEPENDS "${emexe_GLUE_PATH}.cpp")
em_link_pre_js(${emexe_NAME} ${emexe_PRE_LINK_JS_SOURCES})
em_link_post_js(${emexe_NAME} "${emexe_GLUE_PATH}.js"
${emexe_POST_LINK_JS_SOURCES})
diff --git a/contrib/draco/cmake/draco_flags.cmake b/contrib/draco/cmake/draco_flags.cmake
index 0397859a4..f3b24c6e1 100644
--- a/contrib/draco/cmake/draco_flags.cmake
+++ b/contrib/draco/cmake/draco_flags.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_FLAGS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_FLAGS_CMAKE_
@@ -24,7 +38,7 @@ macro(draco_set_compiler_flags_for_sources)
endif()
set_source_files_properties(${compiler_SOURCES} PROPERTIES COMPILE_FLAGS
- ${compiler_FLAGS})
+ ${compiler_FLAGS})
if(DRACO_VERBOSE GREATER 1)
foreach(source ${compiler_SOURCES})
@@ -85,8 +99,8 @@ macro(draco_test_cxx_flag)
# are passed as a list it will remove the list separators, and attempt to run
# a compile command using list entries concatenated together as a single
# argument. Avoid the problem by forcing the argument to be a string.
- draco_set_and_stringify(SOURCE_VARS all_cxx_flags DEST all_cxx_flags)
- check_cxx_compiler_flag("${all_cxx_flags}" draco_all_cxx_flags_pass)
+ draco_set_and_stringify(SOURCE_VARS all_cxx_flags DEST all_cxx_flags_string)
+ check_cxx_compiler_flag("${all_cxx_flags_string}" draco_all_cxx_flags_pass)
if(cxx_test_FLAG_REQUIRED AND NOT draco_all_cxx_flags_pass)
draco_die("Flag test failed for required flag(s): "
@@ -245,3 +259,34 @@ macro(draco_set_cxx_flags)
draco_test_cxx_flag(FLAG_LIST_VAR_NAMES ${cxx_flag_lists})
endif()
endmacro()
+
+# Collects Draco built-in and user-specified linker flags and tests them. Halts
+# configuration and reports the error when any flags cause the build to fail.
+#
+# Note: draco_test_exe_linker_flag() does the real work of setting the flags and
+# running the test compile commands.
+macro(draco_set_exe_linker_flags)
+ unset(linker_flag_lists)
+
+ if(DRACO_VERBOSE)
+ message("draco_set_exe_linker_flags: "
+ "draco_base_exe_linker_flags=${draco_base_exe_linker_flags}")
+ endif()
+
+ if(draco_base_exe_linker_flags)
+ list(APPEND linker_flag_lists draco_base_exe_linker_flags)
+ endif()
+
+ if(linker_flag_lists)
+ unset(test_linker_flags)
+
+ if(DRACO_VERBOSE)
+ message("draco_set_exe_linker_flags: "
+ "linker_flag_lists=${linker_flag_lists}")
+ endif()
+
+ draco_set_and_stringify(DEST test_linker_flags SOURCE_VARS
+ ${linker_flag_lists})
+ draco_test_exe_linker_flag(FLAG_LIST_VAR_NAME test_linker_flags)
+ endif()
+endmacro()
diff --git a/contrib/draco/cmake/draco_helpers.cmake b/contrib/draco/cmake/draco_helpers.cmake
index 0b3b804cf..69e24c5be 100644
--- a/contrib/draco/cmake/draco_helpers.cmake
+++ b/contrib/draco/cmake/draco_helpers.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_HELPERS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_HELPERS_CMAKE_
diff --git a/contrib/draco/cmake/draco_install.cmake b/contrib/draco/cmake/draco_install.cmake
index 09bfb591d..3be1ba163 100644
--- a/contrib/draco/cmake/draco_install.cmake
+++ b/contrib/draco/cmake/draco_install.cmake
@@ -1,32 +1,32 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_INSTALL_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_INSTALL_CMAKE_
set(DRACO_CMAKE_DRACO_INSTALL_CMAKE_ 1)
+include(CMakePackageConfigHelpers)
+include(GNUInstallDirs)
+
# Sets up the draco install targets. Must be called after the static library
# target is created.
macro(draco_setup_install_target)
- include(GNUInstallDirs)
-
- # pkg-config: draco.pc
- set(prefix "${CMAKE_INSTALL_PREFIX}")
- set(exec_prefix "\${prefix}")
- set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}")
- set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
- set(draco_lib_name "draco")
-
- configure_file("${draco_root}/cmake/draco.pc.template"
- "${draco_build}/draco.pc" @ONLY NEWLINE_STYLE UNIX)
- install(FILES "${draco_build}/draco.pc"
- DESTINATION "${prefix}/${CMAKE_INSTALL_LIBDIR}/pkgconfig")
-
- # CMake config: draco-config.cmake
- set(DRACO_INCLUDE_DIRS "${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
- configure_file("${draco_root}/cmake/draco-config.cmake.template"
- "${draco_build}/draco-config.cmake" @ONLY NEWLINE_STYLE UNIX)
- install(
- FILES "${draco_build}/draco-config.cmake"
- DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/cmake")
+ set(bin_path "${CMAKE_INSTALL_BINDIR}")
+ set(data_path "${CMAKE_INSTALL_DATAROOTDIR}")
+ set(includes_path "${CMAKE_INSTALL_INCLUDEDIR}")
+ set(libs_path "${CMAKE_INSTALL_LIBDIR}")
foreach(file ${draco_sources})
if(file MATCHES "h$")
@@ -34,46 +34,88 @@ macro(draco_setup_install_target)
endif()
endforeach()
+ list(REMOVE_DUPLICATES draco_api_includes)
+
# Strip $draco_src_root from the file paths: we need to install relative to
# $include_directory.
list(TRANSFORM draco_api_includes REPLACE "${draco_src_root}/" "")
- set(include_directory "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}")
foreach(draco_api_include ${draco_api_includes})
get_filename_component(file_directory ${draco_api_include} DIRECTORY)
- set(target_directory "${include_directory}/draco/${file_directory}")
+ set(target_directory "${includes_path}/draco/${file_directory}")
install(FILES ${draco_src_root}/${draco_api_include}
DESTINATION "${target_directory}")
endforeach()
- install(
- FILES "${draco_build}/draco/draco_features.h"
- DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/draco/")
+ install(FILES "${draco_build}/draco/draco_features.h"
+ DESTINATION "${includes_path}/draco/")
- install(TARGETS draco_decoder DESTINATION
- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}")
- install(TARGETS draco_encoder DESTINATION
- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}")
+ install(TARGETS draco_decoder DESTINATION "${bin_path}")
+ install(TARGETS draco_encoder DESTINATION "${bin_path}")
+
+ if(DRACO_TRANSCODER_SUPPORTED)
+ install(TARGETS draco_transcoder DESTINATION "${bin_path}")
+ endif()
if(MSVC)
- install(TARGETS draco DESTINATION
- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
+ install(
+ TARGETS draco
+ EXPORT dracoExport
+ RUNTIME DESTINATION "${bin_path}"
+ ARCHIVE DESTINATION "${libs_path}"
+ LIBRARY DESTINATION "${libs_path}")
else()
- install(TARGETS draco_static DESTINATION
- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
+ install(
+ TARGETS draco_static
+ EXPORT dracoExport
+ DESTINATION "${libs_path}")
+
if(BUILD_SHARED_LIBS)
- install(TARGETS draco_shared DESTINATION
- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
+ install(
+ TARGETS draco_shared
+ EXPORT dracoExport
+ RUNTIME DESTINATION "${bin_path}"
+ ARCHIVE DESTINATION "${libs_path}"
+ LIBRARY DESTINATION "${libs_path}")
endif()
endif()
if(DRACO_UNITY_PLUGIN)
- install(TARGETS dracodec_unity DESTINATION
- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
- endif()
- if(DRACO_MAYA_PLUGIN)
- install(TARGETS draco_maya_wrapper DESTINATION
- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
+ install(TARGETS dracodec_unity DESTINATION "${libs_path}")
endif()
+ if(DRACO_MAYA_PLUGIN)
+ install(TARGETS draco_maya_wrapper DESTINATION "${libs_path}")
+ endif()
+
+ # pkg-config: draco.pc
+ configure_file("${draco_root}/cmake/draco.pc.template"
+ "${draco_build}/draco.pc" @ONLY NEWLINE_STYLE UNIX)
+ install(FILES "${draco_build}/draco.pc" DESTINATION "${libs_path}/pkgconfig")
+
+ # CMake config: draco-config.cmake
+ configure_package_config_file(
+ "${draco_root}/cmake/draco-config.cmake.template"
+ "${draco_build}/draco-config.cmake"
+ INSTALL_DESTINATION "${data_path}/cmake/draco")
+
+ write_basic_package_version_file(
+ "${draco_build}/draco-config-version.cmake"
+ VERSION ${DRACO_VERSION}
+ COMPATIBILITY AnyNewerVersion)
+
+ export(
+ EXPORT dracoExport
+ NAMESPACE draco::
+ FILE "${draco_build}/draco-targets.cmake")
+
+ install(
+ EXPORT dracoExport
+ NAMESPACE draco::
+ FILE draco-targets.cmake
+ DESTINATION "${data_path}/cmake/draco")
+
+ install(FILES "${draco_build}/draco-config.cmake"
+ "${draco_build}/draco-config-version.cmake"
+ DESTINATION "${data_path}/cmake/draco")
endmacro()
diff --git a/contrib/draco/cmake/draco_intrinsics.cmake b/contrib/draco/cmake/draco_intrinsics.cmake
index 9011c0de5..178df97a6 100644
--- a/contrib/draco/cmake/draco_intrinsics.cmake
+++ b/contrib/draco/cmake/draco_intrinsics.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_
@@ -61,17 +75,15 @@ macro(draco_process_intrinsics_sources)
unset(sse4_sources)
list(APPEND sse4_sources ${arg_SOURCES})
- list(FILTER sse4_sources INCLUDE REGEX
- "${draco_sse4_source_file_suffix}$")
+ list(FILTER sse4_sources INCLUDE REGEX "${draco_sse4_source_file_suffix}$")
if(sse4_sources)
unset(sse4_flags)
- draco_get_intrinsics_flag_for_suffix(SUFFIX
- ${draco_sse4_source_file_suffix}
- VARIABLE sse4_flags)
+ draco_get_intrinsics_flag_for_suffix(
+ SUFFIX ${draco_sse4_source_file_suffix} VARIABLE sse4_flags)
if(sse4_flags)
draco_set_compiler_flags_for_sources(SOURCES ${sse4_sources} FLAGS
- ${sse4_flags})
+ ${sse4_flags})
endif()
endif()
endif()
@@ -79,17 +91,15 @@ macro(draco_process_intrinsics_sources)
if(DRACO_ENABLE_NEON AND draco_have_neon)
unset(neon_sources)
list(APPEND neon_sources ${arg_SOURCES})
- list(FILTER neon_sources INCLUDE REGEX
- "${draco_neon_source_file_suffix}$")
+ list(FILTER neon_sources INCLUDE REGEX "${draco_neon_source_file_suffix}$")
if(neon_sources AND DRACO_NEON_INTRINSICS_FLAG)
unset(neon_flags)
- draco_get_intrinsics_flag_for_suffix(SUFFIX
- ${draco_neon_source_file_suffix}
- VARIABLE neon_flags)
+ draco_get_intrinsics_flag_for_suffix(
+ SUFFIX ${draco_neon_source_file_suffix} VARIABLE neon_flags)
if(neon_flags)
draco_set_compiler_flags_for_sources(SOURCES ${neon_sources} FLAGS
- ${neon_flags})
+ ${neon_flags})
endif()
endif()
endif()
diff --git a/contrib/draco/cmake/draco_options.cmake b/contrib/draco/cmake/draco_options.cmake
index 832bfb69f..085149774 100644
--- a/contrib/draco/cmake/draco_options.cmake
+++ b/contrib/draco/cmake/draco_options.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_OPTIONS_CMAKE_
@@ -18,17 +32,22 @@ macro(draco_option)
cmake_parse_arguments(option "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
- if(NOT (option_NAME AND option_HELPSTRING AND DEFINED option_VALUE))
+ if(NOT
+ (option_NAME
+ AND option_HELPSTRING
+ AND DEFINED option_VALUE))
message(FATAL_ERROR "draco_option: NAME HELPSTRING and VALUE required.")
endif()
option(${option_NAME} ${option_HELPSTRING} ${option_VALUE})
if(DRACO_VERBOSE GREATER 2)
- message("--------- draco_option ---------\n" "option_NAME=${option_NAME}\n"
- "option_HELPSTRING=${option_HELPSTRING}\n"
- "option_VALUE=${option_VALUE}\n"
- "------------------------------------------\n")
+ message(
+ "--------- draco_option ---------\n"
+ "option_NAME=${option_NAME}\n"
+ "option_HELPSTRING=${option_HELPSTRING}\n"
+ "option_VALUE=${option_VALUE}\n"
+ "------------------------------------------\n")
endif()
list(APPEND draco_options ${option_NAME})
@@ -44,33 +63,74 @@ endmacro()
# Set default options.
macro(draco_set_default_options)
- draco_option(NAME DRACO_FAST HELPSTRING "Try to build faster libs." VALUE OFF)
- draco_option(NAME DRACO_JS_GLUE HELPSTRING
- "Enable JS Glue and JS targets when using Emscripten." VALUE ON)
- draco_option(NAME DRACO_IE_COMPATIBLE HELPSTRING
- "Enable support for older IE builds when using Emscripten." VALUE
- OFF)
- draco_option(NAME DRACO_MESH_COMPRESSION HELPSTRING "Enable mesh compression."
- VALUE ON)
- draco_option(NAME DRACO_POINT_CLOUD_COMPRESSION HELPSTRING
- "Enable point cloud compression." VALUE ON)
- draco_option(NAME DRACO_PREDICTIVE_EDGEBREAKER HELPSTRING
- "Enable predictive edgebreaker." VALUE ON)
- draco_option(NAME DRACO_STANDARD_EDGEBREAKER HELPSTRING
- "Enable stand edgebreaker." VALUE ON)
- draco_option(NAME DRACO_BACKWARDS_COMPATIBILITY HELPSTRING
- "Enable backwards compatibility." VALUE ON)
- draco_option(NAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION HELPSTRING
- "Enable attribute deduping." VALUE OFF)
- draco_option(NAME DRACO_TESTS HELPSTRING "Enables tests." VALUE OFF)
- draco_option(NAME DRACO_WASM HELPSTRING "Enables WASM support." VALUE OFF)
- draco_option(NAME DRACO_UNITY_PLUGIN HELPSTRING
- "Build plugin library for Unity." VALUE OFF)
- draco_option(NAME DRACO_ANIMATION_ENCODING HELPSTRING "Enable animation."
- VALUE OFF)
- draco_option(NAME DRACO_GLTF HELPSTRING "Support GLTF." VALUE OFF)
- draco_option(NAME DRACO_MAYA_PLUGIN HELPSTRING
- "Build plugin library for Maya." VALUE OFF)
+ draco_option(
+ NAME DRACO_FAST
+ HELPSTRING "Try to build faster libs."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_JS_GLUE
+ HELPSTRING "Enable JS Glue and JS targets when using Emscripten."
+ VALUE ON)
+ draco_option(
+ NAME DRACO_IE_COMPATIBLE
+ HELPSTRING "Enable support for older IE builds when using Emscripten."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_MESH_COMPRESSION
+ HELPSTRING "Enable mesh compression."
+ VALUE ON)
+ draco_option(
+ NAME DRACO_POINT_CLOUD_COMPRESSION
+ HELPSTRING "Enable point cloud compression."
+ VALUE ON)
+ draco_option(
+ NAME DRACO_PREDICTIVE_EDGEBREAKER
+ HELPSTRING "Enable predictive edgebreaker."
+ VALUE ON)
+ draco_option(
+ NAME DRACO_STANDARD_EDGEBREAKER
+ HELPSTRING "Enable stand edgebreaker."
+ VALUE ON)
+ draco_option(
+ NAME DRACO_BACKWARDS_COMPATIBILITY
+ HELPSTRING "Enable backwards compatibility."
+ VALUE ON)
+ draco_option(
+ NAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION
+ HELPSTRING "Enable attribute deduping."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_TESTS
+ HELPSTRING "Enables tests."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_WASM
+ HELPSTRING "Enables WASM support."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_UNITY_PLUGIN
+ HELPSTRING "Build plugin library for Unity."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_ANIMATION_ENCODING
+ HELPSTRING "Enable animation."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_GLTF_BITSTREAM
+ HELPSTRING "Draco GLTF extension bitstream specified features only."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_MAYA_PLUGIN
+ HELPSTRING "Build plugin library for Maya."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_TRANSCODER_SUPPORTED
+ HELPSTRING "Enable the Draco transcoder."
+ VALUE OFF)
+ draco_option(
+ NAME DRACO_DEBUG_COMPILER_WARNINGS
+ HELPSTRING "Turn on more warnings."
+ VALUE OFF)
draco_check_deprecated_options()
endmacro()
@@ -117,14 +177,16 @@ macro(draco_check_deprecated_options)
DRACO_MAYA_PLUGIN)
draco_handle_deprecated_option(OLDNAME BUILD_USD_PLUGIN NEWNAME
BUILD_SHARED_LIBS)
+ draco_handle_deprecated_option(OLDNAME DRACO_GLTF NEWNAME
+ DRACO_GLTF_BITSTREAM)
endmacro()
# Macro for setting Draco features based on user configuration. Features enabled
# by this macro are Draco global.
macro(draco_set_optional_features)
- if(DRACO_GLTF)
- # Override settings when building for GLTF.
+ if(DRACO_GLTF_BITSTREAM)
+ # Enable only the features included in the Draco GLTF bitstream spec.
draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
@@ -170,6 +232,11 @@ macro(draco_set_optional_features)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
+ if(DRACO_TRANSCODER_SUPPORTED)
+ draco_enable_feature(FEATURE "DRACO_TRANSCODER_SUPPORTED")
+ endif()
+
+
endmacro()
# Macro that handles tracking of Draco preprocessor symbols for the purpose of
@@ -221,8 +288,56 @@ function(draco_generate_features_h)
file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n")
endforeach()
+ if(MSVC)
+ if(NOT DRACO_DEBUG_COMPILER_WARNINGS)
+ file(APPEND "${draco_features_file_name}.new"
+ "// Enable DRACO_DEBUG_COMPILER_WARNINGS at CMake generation \n"
+ "// time to remove these pragmas.\n")
+
+ # warning C4018: '': signed/unsigned mismatch.
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4018)\n")
+
+ # warning C4146: unary minus operator applied to unsigned type, result
+ # still unsigned
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4146)\n")
+
+ # warning C4244: 'return': conversion from '' to '', possible
+ # loss of data.
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4244)\n")
+
+ # warning C4267: 'initializing' conversion from '' to '',
+ # possible loss of data.
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4267)\n")
+
+ # warning C4305: 'context' : truncation from 'type1' to 'type2'.
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4305)\n")
+
+ # warning C4661: 'identifier' : no suitable definition provided for
+ # explicit template instantiation request.
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4661)\n")
+
+ # warning C4800: Implicit conversion from 'type' to bool. Possible
+ # information loss.
+ # Also, in older MSVC releases:
+ # warning C4800: 'type' : forcing value to bool 'true' or 'false'
+ # (performance warning).
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4800)\n")
+
+ # warning C4804: '': unsafe use of type '' in operation.
+ file(APPEND "${draco_features_file_name}.new"
+ "#pragma warning(disable:4804)\n")
+ endif()
+ endif()
+
file(APPEND "${draco_features_file_name}.new"
- "\n#endif // DRACO_FEATURES_H_")
+ "\n#endif // DRACO_FEATURES_H_\n")
# Will replace ${draco_features_file_name} only if the file content has
# changed. This prevents forced Draco rebuilds after CMake runs.
diff --git a/contrib/draco/cmake/draco_sanitizer.cmake b/contrib/draco/cmake/draco_sanitizer.cmake
index d2e41a6cb..77d141481 100644
--- a/contrib/draco/cmake/draco_sanitizer.cmake
+++ b/contrib/draco/cmake/draco_sanitizer.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_SANITIZER_CMAKE_
@@ -5,7 +19,9 @@ set(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_ 1)
# Handles the details of enabling sanitizers.
macro(draco_configure_sanitizer)
- if(DRACO_SANITIZE AND NOT EMSCRIPTEN AND NOT MSVC)
+ if(DRACO_SANITIZE
+ AND NOT EMSCRIPTEN
+ AND NOT MSVC)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if(DRACO_SANITIZE MATCHES "cfi")
list(APPEND SAN_CXX_FLAGS "-flto" "-fno-sanitize-trap=cfi")
@@ -13,8 +29,8 @@ macro(draco_configure_sanitizer)
"-fuse-ld=gold")
endif()
- if(${CMAKE_SIZEOF_VOID_P} EQUAL 4
- AND DRACO_SANITIZE MATCHES "integer|undefined")
+ if(${CMAKE_SIZEOF_VOID_P} EQUAL 4 AND DRACO_SANITIZE MATCHES
+ "integer|undefined")
list(APPEND SAN_LINKER_FLAGS "--rtlib=compiler-rt" "-lgcc_s")
endif()
endif()
diff --git a/contrib/draco/cmake/draco_targets.cmake b/contrib/draco/cmake/draco_targets.cmake
index 0456c4d7b..c8c79f511 100644
--- a/contrib/draco/cmake/draco_targets.cmake
+++ b/contrib/draco/cmake/draco_targets.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_TARGETS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_TARGETS_CMAKE_
@@ -51,26 +65,33 @@ macro(draco_add_executable)
unset(exe_LIB_DEPS)
set(optional_args TEST)
set(single_value_args NAME OUTPUT_NAME)
- set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS
- OBJLIB_DEPS LIB_DEPS)
+ set(multi_value_args
+ SOURCES
+ DEFINES
+ INCLUDES
+ COMPILE_FLAGS
+ LINK_FLAGS
+ OBJLIB_DEPS
+ LIB_DEPS)
cmake_parse_arguments(exe "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
- message("--------- draco_add_executable ---------\n"
- "exe_TEST=${exe_TEST}\n"
- "exe_TEST_DEFINES_MAIN=${exe_TEST_DEFINES_MAIN}\n"
- "exe_NAME=${exe_NAME}\n"
- "exe_OUTPUT_NAME=${exe_OUTPUT_NAME}\n"
- "exe_SOURCES=${exe_SOURCES}\n"
- "exe_DEFINES=${exe_DEFINES}\n"
- "exe_INCLUDES=${exe_INCLUDES}\n"
- "exe_COMPILE_FLAGS=${exe_COMPILE_FLAGS}\n"
- "exe_LINK_FLAGS=${exe_LINK_FLAGS}\n"
- "exe_OBJLIB_DEPS=${exe_OBJLIB_DEPS}\n"
- "exe_LIB_DEPS=${exe_LIB_DEPS}\n"
- "------------------------------------------\n")
+ message(
+ "--------- draco_add_executable ---------\n"
+ "exe_TEST=${exe_TEST}\n"
+ "exe_TEST_DEFINES_MAIN=${exe_TEST_DEFINES_MAIN}\n"
+ "exe_NAME=${exe_NAME}\n"
+ "exe_OUTPUT_NAME=${exe_OUTPUT_NAME}\n"
+ "exe_SOURCES=${exe_SOURCES}\n"
+ "exe_DEFINES=${exe_DEFINES}\n"
+ "exe_INCLUDES=${exe_INCLUDES}\n"
+ "exe_COMPILE_FLAGS=${exe_COMPILE_FLAGS}\n"
+ "exe_LINK_FLAGS=${exe_LINK_FLAGS}\n"
+ "exe_OBJLIB_DEPS=${exe_OBJLIB_DEPS}\n"
+ "exe_LIB_DEPS=${exe_LIB_DEPS}\n"
+ "------------------------------------------\n")
endif()
if(NOT (exe_NAME AND exe_SOURCES))
@@ -87,7 +108,12 @@ macro(draco_add_executable)
endif()
add_executable(${exe_NAME} ${exe_SOURCES})
- set_target_properties(${exe_NAME} PROPERTIES VERSION ${DRACO_VERSION})
+
+ target_compile_features(${exe_NAME} PUBLIC cxx_std_11)
+
+ if(NOT EMSCRIPTEN)
+ set_target_properties(${exe_NAME} PROPERTIES VERSION ${DRACO_VERSION})
+ endif()
if(exe_OUTPUT_NAME)
set_target_properties(${exe_NAME} PROPERTIES OUTPUT_NAME ${exe_OUTPUT_NAME})
@@ -104,8 +130,8 @@ macro(draco_add_executable)
endif()
if(exe_COMPILE_FLAGS OR DRACO_CXX_FLAGS)
- target_compile_options(${exe_NAME}
- PRIVATE ${exe_COMPILE_FLAGS} ${DRACO_CXX_FLAGS})
+ target_compile_options(${exe_NAME} PRIVATE ${exe_COMPILE_FLAGS}
+ ${DRACO_CXX_FLAGS})
endif()
if(exe_LINK_FLAGS OR DRACO_EXE_LINKER_FLAGS)
@@ -113,8 +139,8 @@ macro(draco_add_executable)
list(APPEND exe_LINK_FLAGS "${DRACO_EXE_LINKER_FLAGS}")
# LINK_FLAGS is managed as a string.
draco_set_and_stringify(SOURCE "${exe_LINK_FLAGS}" DEST exe_LINK_FLAGS)
- set_target_properties(${exe_NAME}
- PROPERTIES LINK_FLAGS "${exe_LINK_FLAGS}")
+ set_target_properties(${exe_NAME} PROPERTIES LINK_FLAGS
+ "${exe_LINK_FLAGS}")
else()
target_link_options(${exe_NAME} PRIVATE ${exe_LINK_FLAGS}
${DRACO_EXE_LINKER_FLAGS})
@@ -136,12 +162,7 @@ macro(draco_add_executable)
endif()
if(exe_LIB_DEPS)
- unset(exe_static)
- if("${CMAKE_EXE_LINKER_FLAGS} ${DRACO_EXE_LINKER_FLAGS}" MATCHES "static")
- set(exe_static ON)
- endif()
-
- if(exe_static AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
+ if(CMAKE_CXX_COMPILER_ID MATCHES "^Clang|^GNU")
# Third party dependencies can introduce dependencies on system and test
# libraries. Since the target created here is an executable, and CMake
# does not provide a method of controlling order of link dependencies,
@@ -149,6 +170,10 @@ macro(draco_add_executable)
# ensure that dependencies of third party targets can be resolved when
# those dependencies happen to be resolved by dependencies of the current
# target.
+ # TODO(tomfinegan): For portability use LINK_GROUP with RESCAN instead of
+ # directly (ab)using compiler/linker specific flags once CMake v3.24 is in
+ # wider use. See:
+ # https://cmake.org/cmake/help/latest/manual/cmake-generator-expressions.7.html#genex:LINK_GROUP
list(INSERT exe_LIB_DEPS 0 -Wl,--start-group)
list(APPEND exe_LIB_DEPS -Wl,--end-group)
endif()
@@ -209,27 +234,36 @@ macro(draco_add_library)
unset(lib_TARGET_PROPERTIES)
set(optional_args TEST)
set(single_value_args NAME OUTPUT_NAME TYPE)
- set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS
- OBJLIB_DEPS LIB_DEPS PUBLIC_INCLUDES TARGET_PROPERTIES)
+ set(multi_value_args
+ SOURCES
+ DEFINES
+ INCLUDES
+ COMPILE_FLAGS
+ LINK_FLAGS
+ OBJLIB_DEPS
+ LIB_DEPS
+ PUBLIC_INCLUDES
+ TARGET_PROPERTIES)
cmake_parse_arguments(lib "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
- message("--------- draco_add_library ---------\n"
- "lib_TEST=${lib_TEST}\n"
- "lib_NAME=${lib_NAME}\n"
- "lib_OUTPUT_NAME=${lib_OUTPUT_NAME}\n"
- "lib_TYPE=${lib_TYPE}\n"
- "lib_SOURCES=${lib_SOURCES}\n"
- "lib_DEFINES=${lib_DEFINES}\n"
- "lib_INCLUDES=${lib_INCLUDES}\n"
- "lib_COMPILE_FLAGS=${lib_COMPILE_FLAGS}\n"
- "lib_LINK_FLAGS=${lib_LINK_FLAGS}\n"
- "lib_OBJLIB_DEPS=${lib_OBJLIB_DEPS}\n"
- "lib_LIB_DEPS=${lib_LIB_DEPS}\n"
- "lib_PUBLIC_INCLUDES=${lib_PUBLIC_INCLUDES}\n"
- "---------------------------------------\n")
+ message(
+ "--------- draco_add_library ---------\n"
+ "lib_TEST=${lib_TEST}\n"
+ "lib_NAME=${lib_NAME}\n"
+ "lib_OUTPUT_NAME=${lib_OUTPUT_NAME}\n"
+ "lib_TYPE=${lib_TYPE}\n"
+ "lib_SOURCES=${lib_SOURCES}\n"
+ "lib_DEFINES=${lib_DEFINES}\n"
+ "lib_INCLUDES=${lib_INCLUDES}\n"
+ "lib_COMPILE_FLAGS=${lib_COMPILE_FLAGS}\n"
+ "lib_LINK_FLAGS=${lib_LINK_FLAGS}\n"
+ "lib_OBJLIB_DEPS=${lib_OBJLIB_DEPS}\n"
+ "lib_LIB_DEPS=${lib_LIB_DEPS}\n"
+ "lib_PUBLIC_INCLUDES=${lib_PUBLIC_INCLUDES}\n"
+ "---------------------------------------\n")
endif()
if(NOT (lib_NAME AND lib_TYPE))
@@ -256,14 +290,24 @@ macro(draco_add_library)
endif()
add_library(${lib_NAME} ${lib_TYPE} ${lib_SOURCES})
+
+ target_compile_features(${lib_NAME} PUBLIC cxx_std_11)
+
+ target_include_directories(${lib_NAME} PUBLIC $)
+
+ if(BUILD_SHARED_LIBS)
+ # Enable PIC for all targets in shared configurations.
+ set_target_properties(${lib_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+ endif()
+
if(lib_SOURCES)
draco_process_intrinsics_sources(TARGET ${lib_NAME} SOURCES ${lib_SOURCES})
endif()
if(lib_OUTPUT_NAME)
if(NOT (BUILD_SHARED_LIBS AND MSVC))
- set_target_properties(${lib_NAME}
- PROPERTIES OUTPUT_NAME ${lib_OUTPUT_NAME})
+ set_target_properties(${lib_NAME} PROPERTIES OUTPUT_NAME
+ ${lib_OUTPUT_NAME})
endif()
endif()
@@ -280,8 +324,8 @@ macro(draco_add_library)
endif()
if(lib_COMPILE_FLAGS OR DRACO_CXX_FLAGS)
- target_compile_options(${lib_NAME}
- PRIVATE ${lib_COMPILE_FLAGS} ${DRACO_CXX_FLAGS})
+ target_compile_options(${lib_NAME} PRIVATE ${lib_COMPILE_FLAGS}
+ ${DRACO_CXX_FLAGS})
endif()
if(lib_LINK_FLAGS)
@@ -320,11 +364,12 @@ macro(draco_add_library)
set_target_properties(${lib_NAME} PROPERTIES PREFIX "")
endif()
- # VERSION and SOVERSION as necessary
- if(NOT lib_TYPE STREQUAL STATIC AND NOT lib_TYPE STREQUAL MODULE)
- set_target_properties(${lib_NAME} PROPERTIES VERSION ${DRACO_VERSION})
- if(NOT MSVC)
- set_target_properties(${lib_NAME} PROPERTIES SOVERSION ${DRACO_SOVERSION})
+ if(NOT EMSCRIPTEN)
+ # VERSION and SOVERSION as necessary
+ if((lib_TYPE STREQUAL BUNDLE OR lib_TYPE STREQUAL SHARED) AND NOT MSVC)
+ set_target_properties(
+ ${lib_NAME} PROPERTIES VERSION ${DRACO_SOVERSION}
+ SOVERSION ${DRACO_SOVERSION_MAJOR})
endif()
endif()
diff --git a/contrib/draco/cmake/draco_test_config.h.cmake b/contrib/draco/cmake/draco_test_config.h.cmake
index 77a574123..9bb174569 100644
--- a/contrib/draco/cmake/draco_test_config.h.cmake
+++ b/contrib/draco/cmake/draco_test_config.h.cmake
@@ -1,3 +1,17 @@
+// Copyright 2021 The Draco Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
#ifndef DRACO_TESTING_DRACO_TEST_CONFIG_H_
#define DRACO_TESTING_DRACO_TEST_CONFIG_H_
@@ -9,5 +23,6 @@
#define DRACO_TEST_DATA_DIR "${DRACO_TEST_DATA_DIR}"
#define DRACO_TEST_TEMP_DIR "${DRACO_TEST_TEMP_DIR}"
+#define DRACO_TEST_ROOT_DIR "${DRACO_TEST_ROOT_DIR}"
#endif // DRACO_TESTING_DRACO_TEST_CONFIG_H_
diff --git a/contrib/draco/cmake/draco_tests.cmake b/contrib/draco/cmake/draco_tests.cmake
index a6dfc5b57..1d905a969 100644
--- a/contrib/draco/cmake/draco_tests.cmake
+++ b/contrib/draco/cmake/draco_tests.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_TESTS_CMAKE)
return()
endif()
@@ -10,6 +24,13 @@ set(draco_factory_test_sources
"${draco_src_root}/io/file_reader_factory_test.cc"
"${draco_src_root}/io/file_writer_factory_test.cc")
+list(
+ APPEND draco_test_common_sources
+ "${draco_src_root}/core/draco_test_base.h"
+ "${draco_src_root}/core/draco_test_utils.cc"
+ "${draco_src_root}/core/draco_test_utils.h"
+ "${draco_src_root}/core/status.cc")
+
list(
APPEND
draco_test_sources
@@ -30,22 +51,23 @@ list(
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoding_test.cc"
"${draco_src_root}/core/buffer_bit_coding_test.cc"
- "${draco_src_root}/core/draco_test_base.h"
- "${draco_src_root}/core/draco_test_utils.cc"
- "${draco_src_root}/core/draco_test_utils.h"
"${draco_src_root}/core/math_utils_test.cc"
"${draco_src_root}/core/quantization_utils_test.cc"
"${draco_src_root}/core/status_test.cc"
"${draco_src_root}/core/vector_d_test.cc"
"${draco_src_root}/io/file_reader_test_common.h"
"${draco_src_root}/io/file_utils_test.cc"
+ "${draco_src_root}/io/file_writer_utils_test.cc"
"${draco_src_root}/io/stdio_file_reader_test.cc"
"${draco_src_root}/io/stdio_file_writer_test.cc"
"${draco_src_root}/io/obj_decoder_test.cc"
"${draco_src_root}/io/obj_encoder_test.cc"
"${draco_src_root}/io/ply_decoder_test.cc"
"${draco_src_root}/io/ply_reader_test.cc"
+ "${draco_src_root}/io/stl_decoder_test.cc"
+ "${draco_src_root}/io/stl_encoder_test.cc"
"${draco_src_root}/io/point_cloud_io_test.cc"
+ "${draco_src_root}/mesh/corner_table_test.cc"
"${draco_src_root}/mesh/mesh_are_equivalent_test.cc"
"${draco_src_root}/mesh/mesh_cleanup_test.cc"
"${draco_src_root}/mesh/triangle_soup_mesh_builder_test.cc"
@@ -54,47 +76,71 @@ list(
"${draco_src_root}/point_cloud/point_cloud_builder_test.cc"
"${draco_src_root}/point_cloud/point_cloud_test.cc")
-list(APPEND draco_gtest_all
- "${draco_root}/../googletest/googletest/src/gtest-all.cc")
-list(APPEND draco_gtest_main
- "${draco_root}/../googletest/googletest/src/gtest_main.cc")
+if(DRACO_TRANSCODER_SUPPORTED)
+ list(
+ APPEND draco_test_sources
+ "${draco_src_root}/animation/animation_test.cc"
+ "${draco_src_root}/io/gltf_decoder_test.cc"
+ "${draco_src_root}/io/gltf_encoder_test.cc"
+ "${draco_src_root}/io/gltf_utils_test.cc"
+ "${draco_src_root}/io/gltf_test_helper.cc"
+ "${draco_src_root}/io/gltf_test_helper.h"
+ "${draco_src_root}/io/scene_io_test.cc"
+ "${draco_src_root}/io/texture_io_test.cc"
+ "${draco_src_root}/material/material_library_test.cc"
+ "${draco_src_root}/material/material_test.cc"
+ "${draco_src_root}/metadata/property_table_test.cc"
+ "${draco_src_root}/metadata/structural_metadata_test.cc"
+ "${draco_src_root}/scene/instance_array_test.cc"
+ "${draco_src_root}/scene/light_test.cc"
+ "${draco_src_root}/scene/mesh_group_test.cc"
+ "${draco_src_root}/scene/scene_test.cc"
+ "${draco_src_root}/scene/scene_are_equivalent_test.cc"
+ "${draco_src_root}/scene/scene_utils_test.cc"
+ "${draco_src_root}/scene/trs_matrix_test.cc"
+ "${draco_src_root}/texture/texture_library_test.cc"
+ "${draco_src_root}/texture/texture_map_test.cc"
+ "${draco_src_root}/texture/texture_transform_test.cc")
+
+endif()
macro(draco_setup_test_targets)
if(DRACO_TESTS)
+ draco_setup_googletest()
+
if(NOT (EXISTS ${draco_gtest_all} AND EXISTS ${draco_gtest_main}))
- message(FATAL "googletest must be a sibling directory of ${draco_root}.")
+ message(FATAL_ERROR "googletest missing, run git submodule update --init")
endif()
list(APPEND draco_test_defines GTEST_HAS_PTHREAD=0)
- draco_add_library(TEST
- NAME
- draco_gtest
- TYPE
- STATIC
- SOURCES
- ${draco_gtest_all}
- DEFINES
- ${draco_defines}
- ${draco_test_defines}
- INCLUDES
- ${draco_test_include_paths})
+ draco_add_library(
+ TEST
+ NAME draco_test_common
+ TYPE STATIC
+ SOURCES ${draco_test_common_sources}
+ DEFINES ${draco_defines} ${draco_test_defines}
+ INCLUDES ${draco_test_include_paths})
- draco_add_library(TEST
- NAME
- draco_gtest_main
- TYPE
- STATIC
- SOURCES
- ${draco_gtest_main}
- DEFINES
- ${draco_defines}
- ${draco_test_defines}
- INCLUDES
- ${draco_test_include_paths})
+ draco_add_library(
+ TEST
+ NAME draco_gtest
+ TYPE STATIC
+ SOURCES ${draco_gtest_all}
+ DEFINES ${draco_defines} ${draco_test_defines}
+ INCLUDES ${draco_test_include_paths})
+
+ draco_add_library(
+ TEST
+ NAME draco_gtest_main
+ TYPE STATIC
+ SOURCES ${draco_gtest_main}
+ DEFINES ${draco_defines} ${draco_test_defines}
+ INCLUDES ${draco_test_include_paths})
set(DRACO_TEST_DATA_DIR "${draco_root}/testdata")
set(DRACO_TEST_TEMP_DIR "${draco_build}/draco_test_temp")
+ set(DRACO_TEST_ROOT_DIR "${draco_root}")
file(MAKE_DIRECTORY "${DRACO_TEST_TEMP_DIR}")
# Sets DRACO_TEST_DATA_DIR and DRACO_TEST_TEMP_DIR.
@@ -102,32 +148,24 @@ macro(draco_setup_test_targets)
"${draco_build}/testing/draco_test_config.h")
# Create the test targets.
- draco_add_executable(NAME
- draco_tests
- SOURCES
- ${draco_test_sources}
- DEFINES
- ${draco_defines}
- ${draco_test_defines}
- INCLUDES
- ${draco_test_include_paths}
- LIB_DEPS
- draco_static
- draco_gtest
- draco_gtest_main)
+ draco_add_executable(
+ TEST
+ NAME draco_tests
+ SOURCES ${draco_test_sources}
+ DEFINES ${draco_defines} ${draco_test_defines}
+ INCLUDES ${draco_test_include_paths}
+ LIB_DEPS ${draco_dependency} draco_gtest draco_gtest_main
+ draco_test_common)
+
+ draco_add_executable(
+ TEST
+ NAME draco_factory_tests
+ SOURCES ${draco_factory_test_sources}
+ DEFINES ${draco_defines} ${draco_test_defines}
+ INCLUDES ${draco_test_include_paths}
+ LIB_DEPS ${draco_dependency} draco_gtest draco_gtest_main
+ draco_test_common)
+
- draco_add_executable(NAME
- draco_factory_tests
- SOURCES
- ${draco_factory_test_sources}
- DEFINES
- ${draco_defines}
- ${draco_test_defines}
- INCLUDES
- ${draco_test_include_paths}
- LIB_DEPS
- draco_static
- draco_gtest
- draco_gtest_main)
endif()
endmacro()
diff --git a/contrib/draco/cmake/draco_variables.cmake b/contrib/draco/cmake/draco_variables.cmake
index 8dbc77a53..6d1b6a99d 100644
--- a/contrib/draco/cmake/draco_variables.cmake
+++ b/contrib/draco/cmake/draco_variables.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_VARIABLES_CMAKE_
@@ -14,8 +28,7 @@ macro(draco_variable_must_be_directory variable_name)
if("${${variable_name}}" STREQUAL "")
message(
- FATAL_ERROR
- "Empty variable ${variable_name} is required to build draco.")
+ FATAL_ERROR "Empty variable ${variable_name} is required to build draco.")
endif()
if(NOT IS_DIRECTORY "${${variable_name}}")
@@ -44,11 +57,13 @@ macro(draco_dump_cmake_flag_variables)
list(APPEND flag_variables "CMAKE_CXX_FLAGS_INIT" "CMAKE_CXX_FLAGS"
"CMAKE_EXE_LINKER_FLAGS_INIT" "CMAKE_EXE_LINKER_FLAGS")
if(CMAKE_BUILD_TYPE)
- list(APPEND flag_variables "CMAKE_BUILD_TYPE"
- "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
- "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}"
- "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
- "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}")
+ list(
+ APPEND flag_variables
+ "CMAKE_BUILD_TYPE"
+ "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
+ "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}"
+ "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
+ "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}")
endif()
foreach(flag_variable ${flag_variables})
message("${flag_variable}:${${flag_variable}}")
diff --git a/contrib/draco/cmake/sanitizers.cmake b/contrib/draco/cmake/sanitizers.cmake
deleted file mode 100644
index e720bc045..000000000
--- a/contrib/draco/cmake/sanitizers.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-if(DRACO_CMAKE_SANITIZERS_CMAKE_)
- return()
-endif()
-set(DRACO_CMAKE_SANITIZERS_CMAKE_ 1)
-
-if(MSVC OR NOT SANITIZE)
- return()
-endif()
-
-include("${draco_root}/cmake/compiler_flags.cmake")
-
-string(TOLOWER ${SANITIZE} SANITIZE)
-
-# Require the sanitizer requested.
-require_linker_flag("-fsanitize=${SANITIZE}")
-require_compiler_flag("-fsanitize=${SANITIZE}" YES)
-
-# Make callstacks accurate.
-require_compiler_flag("-fno-omit-frame-pointer -fno-optimize-sibling-calls" YES)
diff --git a/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake b/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake
index 87e0b4a45..a55da20fa 100644
--- a/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake
+++ b/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_
diff --git a/contrib/draco/cmake/toolchains/android-ndk-common.cmake b/contrib/draco/cmake/toolchains/android-ndk-common.cmake
index 5126d6e29..80396af48 100644
--- a/contrib/draco/cmake/toolchains/android-ndk-common.cmake
+++ b/contrib/draco/cmake/toolchains/android-ndk-common.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_)
return()
endif()
diff --git a/contrib/draco/cmake/toolchains/android.cmake b/contrib/draco/cmake/toolchains/android.cmake
index b8f576d5e..ba50576b7 100644
--- a/contrib/draco/cmake/toolchains/android.cmake
+++ b/contrib/draco/cmake/toolchains/android.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_
@@ -16,9 +30,9 @@ if(NOT ANDROID_ABI)
set(ANDROID_ABI arm64-v8a)
endif()
-# Force arm mode for 32-bit targets (instead of the default thumb) to improve
-# performance.
-if(NOT ANDROID_ARM_MODE)
+# Force arm mode for 32-bit arm targets (instead of the default thumb) to
+# improve performance.
+if(ANDROID_ABI MATCHES "^armeabi" AND NOT ANDROID_ARM_MODE)
set(ANDROID_ARM_MODE arm)
endif()
diff --git a/contrib/draco/cmake/toolchains/arm-ios-common.cmake b/contrib/draco/cmake/toolchains/arm-ios-common.cmake
index 65326d1c2..fab54bb39 100644
--- a/contrib/draco/cmake/toolchains/arm-ios-common.cmake
+++ b/contrib/draco/cmake/toolchains/arm-ios-common.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARM_IOS_COMMON_CMAKE_)
return()
endif()
@@ -13,5 +27,3 @@ set(CMAKE_C_COMPILER clang)
set(CMAKE_C_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")
set(CMAKE_CXX_COMPILER clang++)
set(CMAKE_CXX_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")
-
-# TODO(tomfinegan): Handle bit code embedding.
diff --git a/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake b/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake
index 6e45969e9..f1f83d67c 100644
--- a/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake
+++ b/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_
diff --git a/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake
index 4b6d366f0..80d452f97 100644
--- a/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake
+++ b/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
diff --git a/contrib/draco/cmake/toolchains/arm64-ios.cmake b/contrib/draco/cmake/toolchains/arm64-ios.cmake
index c4ec7e3fa..5365d70f1 100644
--- a/contrib/draco/cmake/toolchains/arm64-ios.cmake
+++ b/contrib/draco/cmake/toolchains/arm64-ios.cmake
@@ -1,10 +1,23 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_ 1)
if(XCODE)
- # TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
diff --git a/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake b/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake
index 046ff0139..a332760b2 100644
--- a/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake
+++ b/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_)
return()
endif()
diff --git a/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake
index 80ee98b18..bedcc0cad 100644
--- a/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake
+++ b/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
diff --git a/contrib/draco/cmake/toolchains/armv7-ios.cmake b/contrib/draco/cmake/toolchains/armv7-ios.cmake
index 8ddd6997b..43e208b1f 100644
--- a/contrib/draco/cmake/toolchains/armv7-ios.cmake
+++ b/contrib/draco/cmake/toolchains/armv7-ios.cmake
@@ -1,10 +1,23 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_ 1)
if(XCODE)
- # TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
diff --git a/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake b/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake
index 9c9472319..730a87f4b 100644
--- a/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake
+++ b/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_)
return()
endif()
diff --git a/contrib/draco/cmake/toolchains/armv7s-ios.cmake b/contrib/draco/cmake/toolchains/armv7s-ios.cmake
index b433025ba..472756117 100644
--- a/contrib/draco/cmake/toolchains/armv7s-ios.cmake
+++ b/contrib/draco/cmake/toolchains/armv7s-ios.cmake
@@ -1,10 +1,23 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_ 1)
if(XCODE)
- # TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
diff --git a/contrib/draco/cmake/toolchains/i386-ios.cmake b/contrib/draco/cmake/toolchains/i386-ios.cmake
index e9a105591..38989d225 100644
--- a/contrib/draco/cmake/toolchains/i386-ios.cmake
+++ b/contrib/draco/cmake/toolchains/i386-ios.cmake
@@ -1,10 +1,23 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_ 1)
if(XCODE)
- # TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
diff --git a/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake
index d43383640..6f63f2c31 100644
--- a/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake
+++ b/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
diff --git a/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake
index d6fabeacc..7a630f4d4 100644
--- a/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake
+++ b/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake
@@ -1,3 +1,17 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
diff --git a/contrib/draco/cmake/toolchains/x86_64-ios.cmake b/contrib/draco/cmake/toolchains/x86_64-ios.cmake
index 4c50a72a2..6946ce410 100644
--- a/contrib/draco/cmake/toolchains/x86_64-ios.cmake
+++ b/contrib/draco/cmake/toolchains/x86_64-ios.cmake
@@ -1,10 +1,23 @@
+# Copyright 2021 The Draco Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
if(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_ 1)
if(XCODE)
- # TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
diff --git a/contrib/draco/cmake/util.cmake b/contrib/draco/cmake/util.cmake
deleted file mode 100644
index 813146a62..000000000
--- a/contrib/draco/cmake/util.cmake
+++ /dev/null
@@ -1,79 +0,0 @@
-if(DRACO_CMAKE_UTIL_CMAKE_)
- return()
-endif()
-set(DRACO_CMAKE_UTIL_CMAKE_ 1)
-
-# Creates dummy source file in $draco_build_dir named $basename.$extension and
-# returns the full path to the dummy source file via the $out_file_path
-# parameter.
-function(create_dummy_source_file basename extension out_file_path)
- set(dummy_source_file "${draco_build_dir}/${basename}.${extension}")
- file(WRITE "${dummy_source_file}.new"
- "// Generated file. DO NOT EDIT!\n"
- "// ${target_name} needs a ${extension} file to force link language, \n"
- "// or to silence a harmless CMake warning: Ignore me.\n"
- "void ${target_name}_dummy_function(void) {}\n")
-
- # Will replace ${dummy_source_file} only if the file content has changed.
- # This prevents forced Draco rebuilds after CMake runs.
- configure_file("${dummy_source_file}.new" "${dummy_source_file}")
- file(REMOVE "${dummy_source_file}.new")
-
- set(${out_file_path} ${dummy_source_file} PARENT_SCOPE)
-endfunction()
-
-# Convenience function for adding a dummy source file to $target_name using
-# $extension as the file extension. Wraps create_dummy_source_file().
-function(add_dummy_source_file_to_target target_name extension)
- create_dummy_source_file("${target_name}" "${extension}" "dummy_source_file")
- target_sources(${target_name} PRIVATE ${dummy_source_file})
-endfunction()
-
-# Extracts the version number from $version_file and returns it to the user via
-# $version_string_out_var. This is achieved by finding the first instance of the
-# kDracoVersion variable and then removing everything but the string literal
-# assigned to the variable. Quotes and semicolon are stripped from the returned
-# string.
-function(extract_version_string version_file version_string_out_var)
- file(STRINGS "${version_file}" draco_version REGEX "kDracoVersion")
- list(GET draco_version 0 draco_version)
- string(REPLACE "static const char kDracoVersion[] = " "" draco_version
- "${draco_version}")
- string(REPLACE ";" "" draco_version "${draco_version}")
- string(REPLACE "\"" "" draco_version "${draco_version}")
- set("${version_string_out_var}" "${draco_version}" PARENT_SCOPE)
-endfunction()
-
-# Sets CMake compiler launcher to $launcher_name when $launcher_name is found in
-# $PATH. Warns user about ignoring build flag $launcher_flag when $launcher_name
-# is not found in $PATH.
-function(set_compiler_launcher launcher_flag launcher_name)
- find_program(launcher_path "${launcher_name}")
- if(launcher_path)
- set(CMAKE_C_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE)
- set(CMAKE_CXX_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE)
- message("--- Using ${launcher_name} as compiler launcher.")
- else()
- message(
- WARNING "--- Cannot find ${launcher_name}, ${launcher_flag} ignored.")
- endif()
-endfunction()
-
-# Terminates CMake execution when $var_name is unset in the environment. Sets
-# CMake variable to the value of the environment variable when the variable is
-# present in the environment.
-macro(require_variable var_name)
- if("$ENV{${var_name}}" STREQUAL "")
- message(FATAL_ERROR "${var_name} must be set in environment.")
- endif()
- set_variable_if_unset(${var_name} "")
-endmacro()
-
-# Sets $var_name to $default_value if not already set.
-macro(set_variable_if_unset var_name default_value)
- if(NOT "$ENV{${var_name}}" STREQUAL "")
- set(${var_name} $ENV{${var_name}})
- elseif(NOT ${var_name})
- set(${var_name} ${default_value})
- endif()
-endmacro()
diff --git a/contrib/draco/src/draco/animation/animation.cc b/contrib/draco/src/draco/animation/animation.cc
new file mode 100644
index 000000000..471cf2942
--- /dev/null
+++ b/contrib/draco/src/draco/animation/animation.cc
@@ -0,0 +1,47 @@
+// Copyright 2019 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/animation.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+
+namespace draco {
+
+void Animation::Copy(const Animation &src) {
+ name_ = src.name_;
+ channels_.clear();
+ for (int i = 0; i < src.NumChannels(); ++i) {
+ std::unique_ptr new_channel(new AnimationChannel());
+ new_channel->Copy(*src.GetChannel(i));
+ channels_.push_back(std::move(new_channel));
+ }
+
+ samplers_.clear();
+ for (int i = 0; i < src.NumSamplers(); ++i) {
+ std::unique_ptr new_sampler(new AnimationSampler());
+ new_sampler->Copy(*src.GetSampler(i));
+ samplers_.push_back(std::move(new_sampler));
+ }
+
+ node_animation_data_.clear();
+ for (int i = 0; i < src.NumNodeAnimationData(); ++i) {
+ std::unique_ptr new_data(new NodeAnimationData());
+ new_data->Copy(*src.GetNodeAnimationData(i));
+ node_animation_data_.push_back(std::move(new_data));
+ }
+}
+
+} // namespace draco
+
+#endif // DRACO_TRANSCODER_SUPPORTED
diff --git a/contrib/draco/src/draco/animation/animation.h b/contrib/draco/src/draco/animation/animation.h
new file mode 100644
index 000000000..3713f9886
--- /dev/null
+++ b/contrib/draco/src/draco/animation/animation.h
@@ -0,0 +1,149 @@
+// Copyright 2019 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ANIMATION_ANIMATION_H_
+#define DRACO_ANIMATION_ANIMATION_H_
+
+#include "draco/draco_features.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+#include
+#include
+
+#include "draco/animation/node_animation_data.h"
+#include "draco/core/status.h"
+
+namespace draco {
+
+// Struct to hold information about an animation's sampler.
+struct AnimationSampler {
+ enum class SamplerInterpolation { LINEAR, STEP, CUBICSPLINE };
+
+ static std::string InterpolationToString(SamplerInterpolation value) {
+ switch (value) {
+ case SamplerInterpolation::STEP:
+ return "STEP";
+ case SamplerInterpolation::CUBICSPLINE:
+ return "CUBICSPLINE";
+ default:
+ return "LINEAR";
+ }
+ }
+
+ AnimationSampler()
+ : input_index(-1),
+ interpolation_type(SamplerInterpolation::LINEAR),
+ output_index(-1) {}
+
+ void Copy(const AnimationSampler &src) {
+ input_index = src.input_index;
+ interpolation_type = src.interpolation_type;
+ output_index = src.output_index;
+ }
+
+ int input_index;
+ SamplerInterpolation interpolation_type;
+ int output_index;
+};
+
+// Struct to hold information about an animation's channel.
+struct AnimationChannel {
+ enum class ChannelTransformation { TRANSLATION, ROTATION, SCALE, WEIGHTS };
+
+ static std::string TransformationToString(ChannelTransformation value) {
+ switch (value) {
+ case ChannelTransformation::ROTATION:
+ return "rotation";
+ case ChannelTransformation::SCALE:
+ return "scale";
+ case ChannelTransformation::WEIGHTS:
+ return "weights";
+ default:
+ return "translation";
+ }
+ }
+
+ AnimationChannel()
+ : target_index(-1),
+ transformation_type(ChannelTransformation::TRANSLATION),
+ sampler_index(-1) {}
+
+ void Copy(const AnimationChannel &src) {
+ target_index = src.target_index;
+ transformation_type = src.transformation_type;
+ sampler_index = src.sampler_index;
+ }
+
+ int target_index;
+ ChannelTransformation transformation_type;
+ int sampler_index;
+};
+
+// This class is used to hold data and information of glTF animations.
+class Animation {
+ public:
+ Animation() {}
+
+ void Copy(const Animation &src);
+
+ const std::string &GetName() const { return name_; }
+ void SetName(const std::string &name) { name_ = name; }
+
+ // Returns the number of channels in an animation.
+ int NumChannels() const { return channels_.size(); }
+ // Returns the number of samplers in an animation.
+ int NumSamplers() const { return samplers_.size(); }
+ // Returns the number of accessors in an animation.
+ int NumNodeAnimationData() const { return node_animation_data_.size(); }
+
+ // Returns a channel in the animation.
+ AnimationChannel *GetChannel(int index) { return channels_[index].get(); }
+ const AnimationChannel *GetChannel(int index) const {
+ return channels_[index].get();
+ }
+ // Returns a sampler in the animation.
+ AnimationSampler *GetSampler(int index) { return samplers_[index].get(); }
+ const AnimationSampler *GetSampler(int index) const {
+ return samplers_[index].get();
+ }
+ // Returns an accessor in the animation.
+ NodeAnimationData *GetNodeAnimationData(int index) {
+ return node_animation_data_[index].get();
+ }
+ const NodeAnimationData *GetNodeAnimationData(int index) const {
+ return node_animation_data_[index].get();
+ }
+
+ void AddNodeAnimationData(
+ std::unique_ptr node_animation_data) {
+ node_animation_data_.push_back(std::move(node_animation_data));
+ }
+ void AddSampler(std::unique_ptr sampler) {
+ samplers_.push_back(std::move(sampler));
+ }
+ void AddChannel(std::unique_ptr channel) {
+ channels_.push_back(std::move(channel));
+ }
+
+ private:
+ std::string name_;
+ std::vector> samplers_;
+ std::vector> channels_;
+ std::vector> node_animation_data_;
+};
+
+} // namespace draco
+
+#endif // DRACO_TRANSCODER_SUPPORTED
+#endif // DRACO_ANIMATION_ANIMATION_H_
diff --git a/contrib/draco/src/draco/animation/animation_test.cc b/contrib/draco/src/draco/animation/animation_test.cc
new file mode 100644
index 000000000..473938bca
--- /dev/null
+++ b/contrib/draco/src/draco/animation/animation_test.cc
@@ -0,0 +1,71 @@
+// Copyright 2021 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/animation.h"
+
+#include "draco/core/draco_test_base.h"
+#include "draco/draco_features.h"
+
+namespace {
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+TEST(AnimationTest, TestCopy) {
+ // Test copying of animation data.
+ draco::Animation src_anim;
+ ASSERT_TRUE(src_anim.GetName().empty());
+ src_anim.SetName("Walking");
+ ASSERT_EQ(src_anim.GetName(), "Walking");
+
+ std::unique_ptr src_sampler_0(
+ new draco::AnimationSampler());
+ src_sampler_0->interpolation_type =
+ draco::AnimationSampler::SamplerInterpolation::CUBICSPLINE;
+ std::unique_ptr src_sampler_1(
+ new draco::AnimationSampler());
+ src_sampler_1->Copy(*src_sampler_0);
+
+ ASSERT_EQ(src_sampler_0->interpolation_type,
+ src_sampler_1->interpolation_type);
+
+ src_sampler_1->interpolation_type =
+ draco::AnimationSampler::SamplerInterpolation::STEP;
+
+ src_anim.AddSampler(std::move(src_sampler_0));
+ src_anim.AddSampler(std::move(src_sampler_1));
+ ASSERT_EQ(src_anim.NumSamplers(), 2);
+
+ std::unique_ptr src_channel(
+ new draco::AnimationChannel());
+ src_channel->transformation_type =
+ draco::AnimationChannel::ChannelTransformation::WEIGHTS;
+ src_anim.AddChannel(std::move(src_channel));
+ ASSERT_EQ(src_anim.NumChannels(), 1);
+
+ draco::Animation dst_anim;
+ dst_anim.Copy(src_anim);
+
+ ASSERT_EQ(dst_anim.GetName(), src_anim.GetName());
+ ASSERT_EQ(dst_anim.NumSamplers(), 2);
+ ASSERT_EQ(dst_anim.NumChannels(), 1);
+
+ ASSERT_EQ(dst_anim.GetSampler(0)->interpolation_type,
+ src_anim.GetSampler(0)->interpolation_type);
+ ASSERT_EQ(dst_anim.GetSampler(1)->interpolation_type,
+ src_anim.GetSampler(1)->interpolation_type);
+ ASSERT_EQ(dst_anim.GetChannel(0)->transformation_type,
+ src_anim.GetChannel(0)->transformation_type);
+}
+#endif // DRACO_TRANSCODER_SUPPORTED
+
+} // namespace
diff --git a/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc b/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc
index 4a6491f9d..fcd0eaa6f 100644
--- a/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc
+++ b/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc
@@ -26,8 +26,9 @@ class KeyframeAnimationEncodingTest : public ::testing::Test {
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
- for (int i = 0; i < timestamps_.size(); ++i)
+ for (int i = 0; i < timestamps_.size(); ++i) {
timestamps_[i] = static_cast(i);
+ }
return keyframe_animation_.SetTimestamps(timestamps_);
}
@@ -35,8 +36,9 @@ class KeyframeAnimationEncodingTest : public ::testing::Test {
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
- for (int i = 0; i < animation_data_.size(); ++i)
+ for (int i = 0; i < animation_data_.size(); ++i) {
animation_data_[i] = static_cast(i);
+ }
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
@@ -49,7 +51,7 @@ class KeyframeAnimationEncodingTest : public ::testing::Test {
ASSERT_EQ(animation0.num_animations(), animation1.num_animations());
if (quantized) {
- // TODO(hemmer) : Add test for stable quantization.
+ // TODO(b/199760123) : Add test for stable quantization.
// Quantization will result in slightly different values.
// Skip comparing values.
return;
@@ -109,9 +111,8 @@ class KeyframeAnimationEncodingTest : public ::testing::Test {
}
}
- ASSERT_TRUE(
- encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer)
- .ok());
+ DRACO_ASSERT_OK(
+ encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer));
draco::DecoderBuffer dec_decoder;
draco::KeyframeAnimationDecoder decoder;
@@ -122,8 +123,8 @@ class KeyframeAnimationEncodingTest : public ::testing::Test {
std::unique_ptr decoded_animation(
new KeyframeAnimation());
DecoderOptions dec_options;
- ASSERT_TRUE(
- decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()).ok());
+ DRACO_ASSERT_OK(
+ decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()));
// Verify if animation before and after compression is identical.
CompareAnimationData(keyframe_animation_,
diff --git a/contrib/draco/src/draco/animation/keyframe_animation_test.cc b/contrib/draco/src/draco/animation/keyframe_animation_test.cc
index bc92b25ff..94566972b 100644
--- a/contrib/draco/src/draco/animation/keyframe_animation_test.cc
+++ b/contrib/draco/src/draco/animation/keyframe_animation_test.cc
@@ -24,8 +24,9 @@ class KeyframeAnimationTest : public ::testing::Test {
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
- for (int i = 0; i < timestamps_.size(); ++i)
+ for (int i = 0; i < timestamps_.size(); ++i) {
timestamps_[i] = static_cast(i);
+ }
return keyframe_animation_.SetTimestamps(timestamps_);
}
@@ -33,8 +34,9 @@ class KeyframeAnimationTest : public ::testing::Test {
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
- for (int i = 0; i < animation_data_.size(); ++i)
+ for (int i = 0; i < animation_data_.size(); ++i) {
animation_data_[i] = static_cast(i);
+ }
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
diff --git a/contrib/draco/src/draco/animation/node_animation_data.h b/contrib/draco/src/draco/animation/node_animation_data.h
new file mode 100644
index 000000000..7799e3376
--- /dev/null
+++ b/contrib/draco/src/draco/animation/node_animation_data.h
@@ -0,0 +1,150 @@
+// Copyright 2019 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ANIMATION_NODE_ANIMATION_DATA_H_
+#define DRACO_ANIMATION_NODE_ANIMATION_DATA_H_
+
+#include "draco/draco_features.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+
+#include "draco/core/hash_utils.h"
+#include "draco/core/status.h"
+#include "draco/core/status_or.h"
+
+namespace draco {
+
+// This class is used to store information and data for animations that only
+// affect the nodes.
+// TODO(fgalligan): Think about changing the name of this class now that Skin
+// is using it.
+class NodeAnimationData {
+ public:
+ enum class Type { SCALAR, VEC3, VEC4, MAT4 };
+
+ NodeAnimationData() : type_(Type::SCALAR), count_(0), normalized_(false) {}
+
+ void Copy(const NodeAnimationData &src) {
+ type_ = src.type_;
+ count_ = src.count_;
+ normalized_ = src.normalized_;
+ data_ = src.data_;
+ }
+
+ Type type() const { return type_; }
+ int count() const { return count_; }
+ bool normalized() const { return normalized_; }
+
+ std::vector *GetMutableData() { return &data_; }
+ const std::vector *GetData() const { return &data_; }
+
+ void SetType(Type type) { type_ = type; }
+ void SetCount(int count) { count_ = count; }
+ void SetNormalized(bool normalized) { normalized_ = normalized; }
+
+ int ComponentSize() const { return sizeof(float); }
+ int NumComponents() const {
+ switch (type_) {
+ case Type::SCALAR:
+ return 1;
+ case Type::VEC3:
+ return 3;
+ case Type::MAT4:
+ return 16;
+ default:
+ return 4;
+ }
+ }
+
+ std::string TypeAsString() const {
+ switch (type_) {
+ case Type::SCALAR:
+ return "SCALAR";
+ case Type::VEC3:
+ return "VEC3";
+ case Type::MAT4:
+ return "MAT4";
+ default:
+ return "VEC4";
+ }
+ }
+
+ bool operator==(const NodeAnimationData &nad) const {
+ return type_ == nad.type_ && count_ == nad.count_ &&
+ normalized_ == nad.normalized_ && data_ == nad.data_;
+ }
+
+ private:
+ Type type_;
+ int count_;
+ bool normalized_;
+ std::vector data_;
+};
+
+// Wrapper class for hashing NodeAnimationData. When using different containers,
+// this class is preferable instead of copying the data in NodeAnimationData
+// every time.
+class NodeAnimationDataHash {
+ public:
+ NodeAnimationDataHash() = delete;
+ NodeAnimationDataHash &operator=(const NodeAnimationDataHash &) = delete;
+ NodeAnimationDataHash(NodeAnimationDataHash &&) = delete;
+ NodeAnimationDataHash &operator=(NodeAnimationDataHash &&) = delete;
+
+ explicit NodeAnimationDataHash(const NodeAnimationData *nad)
+ : node_animation_data_(nad) {
+ hash_ = NodeAnimationDataHash::HashNodeAnimationData(*node_animation_data_);
+ }
+
+ NodeAnimationDataHash(const NodeAnimationDataHash &nadh) {
+ node_animation_data_ = nadh.node_animation_data_;
+ hash_ = nadh.hash_;
+ }
+
+ bool operator==(const NodeAnimationDataHash &nadh) const {
+ return *node_animation_data_ == *nadh.node_animation_data_;
+ }
+
+ struct Hash {
+ size_t operator()(const NodeAnimationDataHash &nadh) const {
+ return nadh.hash_;
+ }
+ };
+
+ const NodeAnimationData *GetNodeAnimationData() {
+ return node_animation_data_;
+ }
+
+ private:
+ // Returns a hash of |nad|.
+ static size_t HashNodeAnimationData(const NodeAnimationData &nad) {
+ size_t hash = 79; // Magic number.
+ hash = HashCombine(static_cast(nad.type()), hash);
+ hash = HashCombine(nad.count(), hash);
+ hash = HashCombine(nad.normalized(), hash);
+ const uint64_t data_hash =
+ FingerprintString(reinterpret_cast(nad.GetData()->data()),
+ nad.GetData()->size() * sizeof(float));
+ hash = HashCombine(data_hash, hash);
+ return hash;
+ }
+
+ const NodeAnimationData *node_animation_data_;
+ size_t hash_;
+};
+
+} // namespace draco
+
+#endif // DRACO_TRANSCODER_SUPPORTED
+#endif // DRACO_ANIMATION_NODE_ANIMATION_DATA_H_
diff --git a/contrib/draco/src/draco/animation/skin.cc b/contrib/draco/src/draco/animation/skin.cc
new file mode 100644
index 000000000..f232978c2
--- /dev/null
+++ b/contrib/draco/src/draco/animation/skin.cc
@@ -0,0 +1,29 @@
+// Copyright 2019 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/animation/skin.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+
+namespace draco {
+
+void Skin::Copy(const Skin &s) {
+ inverse_bind_matrices_.Copy(s.GetInverseBindMatrices());
+ joints_ = s.GetJoints();
+ joint_root_index_ = s.GetJointRoot();
+}
+
+} // namespace draco
+
+#endif // DRACO_TRANSCODER_SUPPORTED
diff --git a/contrib/draco/src/draco/animation/skin.h b/contrib/draco/src/draco/animation/skin.h
new file mode 100644
index 000000000..81ca997eb
--- /dev/null
+++ b/contrib/draco/src/draco/animation/skin.h
@@ -0,0 +1,64 @@
+// Copyright 2019 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_ANIMATION_SKIN_H_
+#define DRACO_ANIMATION_SKIN_H_
+
+#include "draco/draco_features.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+
+#include
+
+#include "draco/animation/node_animation_data.h"
+#include "draco/scene/scene_indices.h"
+
+namespace draco {
+
+// This class is used to store information on animation skins.
+class Skin {
+ public:
+ Skin() : joint_root_index_(-1) {}
+
+ void Copy(const Skin &s);
+
+ NodeAnimationData &GetInverseBindMatrices() { return inverse_bind_matrices_; }
+ const NodeAnimationData &GetInverseBindMatrices() const {
+ return inverse_bind_matrices_;
+ }
+
+ int AddJoint(SceneNodeIndex index) {
+ joints_.push_back(index);
+ return joints_.size() - 1;
+ }
+ int NumJoints() const { return joints_.size(); }
+ SceneNodeIndex GetJoint(int index) const { return joints_[index]; }
+ SceneNodeIndex &GetJoint(int index) { return joints_[index]; }
+ const std::vector &GetJoints() const { return joints_; }
+
+ void SetJointRoot(SceneNodeIndex index) { joint_root_index_ = index; }
+ SceneNodeIndex GetJointRoot() const { return joint_root_index_; }
+
+ private:
+ NodeAnimationData inverse_bind_matrices_;
+
+ // List of node indices that make up the joint hierarchy.
+ std::vector joints_;
+ SceneNodeIndex joint_root_index_;
+};
+
+} // namespace draco
+
+#endif // DRACO_TRANSCODER_SUPPORTED
+#endif // DRACO_ANIMATION_SKIN_H_
diff --git a/contrib/draco/src/draco/attributes/attribute_transform.cc b/contrib/draco/src/draco/attributes/attribute_transform.cc
index 174e6b822..fb2ed1829 100644
--- a/contrib/draco/src/draco/attributes/attribute_transform.cc
+++ b/contrib/draco/src/draco/attributes/attribute_transform.cc
@@ -28,12 +28,13 @@ std::unique_ptr AttributeTransform::InitTransformedAttribute(
const PointAttribute &src_attribute, int num_entries) {
const int num_components = GetTransformedNumComponents(src_attribute);
const DataType dt = GetTransformedDataType(src_attribute);
- GeometryAttribute va;
- va.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false,
+ GeometryAttribute ga;
+ ga.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false,
num_components * DataTypeLength(dt), 0);
- std::unique_ptr transformed_attribute(new PointAttribute(va));
+ std::unique_ptr transformed_attribute(new PointAttribute(ga));
transformed_attribute->Reset(num_entries);
transformed_attribute->SetIdentityMapping();
+ transformed_attribute->set_unique_id(src_attribute.unique_id());
return transformed_attribute;
}
diff --git a/contrib/draco/src/draco/attributes/geometry_attribute.cc b/contrib/draco/src/draco/attributes/geometry_attribute.cc
index b62478426..141130f43 100644
--- a/contrib/draco/src/draco/attributes/geometry_attribute.cc
+++ b/contrib/draco/src/draco/attributes/geometry_attribute.cc
@@ -26,7 +26,7 @@ GeometryAttribute::GeometryAttribute()
unique_id_(0) {}
void GeometryAttribute::Init(GeometryAttribute::Type attribute_type,
- DataBuffer *buffer, int8_t num_components,
+ DataBuffer *buffer, uint8_t num_components,
DataType data_type, bool normalized,
int64_t byte_stride, int64_t byte_offset) {
buffer_ = buffer;
diff --git a/contrib/draco/src/draco/attributes/geometry_attribute.h b/contrib/draco/src/draco/attributes/geometry_attribute.h
index f4d099b1b..28f743fa0 100644
--- a/contrib/draco/src/draco/attributes/geometry_attribute.h
+++ b/contrib/draco/src/draco/attributes/geometry_attribute.h
@@ -15,12 +15,18 @@
#ifndef DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
#define DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
+#include
#include
+#include
#include
#include "draco/attributes/geometry_indices.h"
#include "draco/core/data_buffer.h"
#include "draco/core/hash_utils.h"
+#include "draco/draco_features.h"
+#ifdef DRACO_TRANSCODER_SUPPORTED
+#include "draco/core/status.h"
+#endif
namespace draco {
@@ -51,6 +57,16 @@ class GeometryAttribute {
// predefined use case. Such attributes are often used for a shader specific
// data.
GENERIC,
+#ifdef DRACO_TRANSCODER_SUPPORTED
+ // TODO(ostava): Adding a new attribute would be bit-stream change for GLTF.
+ // Older decoders wouldn't know what to do with this attribute type. This
+ // should be open-sourced only when we are ready to increase our bit-stream
+ // version.
+ TANGENT,
+ MATERIAL,
+ JOINTS,
+ WEIGHTS,
+#endif
// Total number of different attribute types.
// Always keep behind all named attributes.
NAMED_ATTRIBUTES_COUNT,
@@ -58,7 +74,7 @@ class GeometryAttribute {
GeometryAttribute();
// Initializes and enables the attribute.
- void Init(Type attribute_type, DataBuffer *buffer, int8_t num_components,
+ void Init(Type attribute_type, DataBuffer *buffer, uint8_t num_components,
DataType data_type, bool normalized, int64_t byte_stride,
int64_t byte_offset);
bool IsValid() const { return buffer_ != nullptr; }
@@ -129,6 +145,17 @@ class GeometryAttribute {
buffer_->Write(byte_pos, value, byte_stride());
}
+#ifdef DRACO_TRANSCODER_SUPPORTED
+ // Sets a value of an attribute entry. The input |value| must have
+ // |input_num_components| entries and it will be automatically converted to
+ // the internal format used by the geometry attribute. If the conversion is
+ // not possible, an error status will be returned.
+ template
+ Status ConvertAndSetAttributeValue(AttributeValueIndex avi,
+ int input_num_components,
+ const InputT *value);
+#endif
+
// DEPRECATED: Use
// ConvertValue(AttributeValueIndex att_id,
// int out_num_components,
@@ -233,10 +260,11 @@ class GeometryAttribute {
// Returns the number of components that are stored for each entry.
// For position attribute this is usually three (x,y,z),
// while texture coordinates have two components (u,v).
- int8_t num_components() const { return num_components_; }
+ uint8_t num_components() const { return num_components_; }
// Indicates whether the data type should be normalized before interpretation,
// that is, it should be divided by the max value of the data type.
bool normalized() const { return normalized_; }
+ void set_normalized(bool normalized) { normalized_ = normalized; }
// The buffer storing the entire data of the attribute.
const DataBuffer *buffer() const { return buffer_; }
// Returns the number of bytes between two attribute entries, this is, at
@@ -260,7 +288,7 @@ class GeometryAttribute {
// T is the stored attribute data type.
// OutT is the desired data type of the attribute.
template
- bool ConvertTypedValue(AttributeValueIndex att_id, int8_t out_num_components,
+ bool ConvertTypedValue(AttributeValueIndex att_id, uint8_t out_num_components,
OutT *out_value) const {
const uint8_t *src_address = GetAddress(att_id);
@@ -270,29 +298,10 @@ class GeometryAttribute {
return false;
}
const T in_value = *reinterpret_cast(src_address);
-
- // Make sure the in_value fits within the range of values that OutT
- // is able to represent. Perform the check only for integral types.
- if (std::is_integral::value && std::is_integral::value) {
- static constexpr OutT kOutMin =
- std::is_signed::value ? std::numeric_limits::lowest() : 0;
- if (in_value < kOutMin || in_value > std::numeric_limits::max()) {
- return false;
- }
+ if (!ConvertComponentValue(in_value, normalized_,
+ out_value + i)) {
+ return false;
}
-
- out_value[i] = static_cast(in_value);
- // When converting integer to floating point, normalize the value if
- // necessary.
- if (std::is_integral::value && std::is_floating_point::value &&
- normalized_) {
- out_value[i] /= static_cast(std::numeric_limits::max());
- }
- // TODO(ostava): Add handling of normalized attributes when converting
- // between different integer representations. If the attribute is
- // normalized, integer values should be converted as if they represent 0-1
- // range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1>
- // should be converted to range <0, 2^8 - 1>.
src_address += sizeof(T);
}
// Fill empty data for unused output components if needed.
@@ -302,12 +311,128 @@ class GeometryAttribute {
return true;
}
+#ifdef DRACO_TRANSCODER_SUPPORTED
+ // Function that converts input |value| from type T to the internal attribute
+ // representation defined by OutT and |num_components_|.
+ template
+ Status ConvertAndSetAttributeTypedValue(AttributeValueIndex avi,
+ int8_t input_num_components,
+ const T *value) {
+ uint8_t *address = GetAddress(avi);
+
+ // Convert all components available in both the original and output formats.
+ for (int i = 0; i < num_components_; ++i) {
+ if (!IsAddressValid(address)) {
+ return ErrorStatus("GeometryAttribute: Invalid address.");
+ }
+ OutT *const out_value = reinterpret_cast(address);
+ if (i < input_num_components) {
+ if (!ConvertComponentValue(*(value + i), normalized_,
+ out_value)) {
+ return ErrorStatus(
+ "GeometryAttribute: Failed to convert component value.");
+ }
+ } else {
+ *out_value = static_cast(0);
+ }
+ address += sizeof(OutT);
+ }
+ return OkStatus();
+ }
+#endif // DRACO_TRANSCODER_SUPPORTED
+
+ // Converts |in_value| of type T into |out_value| of type OutT. If
+ // |normalized| is true, any conversion between floating point and integer
+ // values will be treating integers as normalized types (the entire integer
+ // range will be used to represent 0-1 floating point range).
+ template
+ static bool ConvertComponentValue(const T &in_value, bool normalized,
+ OutT *out_value) {
+ // Make sure the |in_value| can be represented as an integral type OutT.
+ if (std::is_integral::value) {
+ // Make sure the |in_value| fits within the range of values that OutT
+ // is able to represent. Perform the check only for integral types.
+ if (!std::is_same::value && std::is_integral::value) {
+ static constexpr OutT kOutMin =
+ std::is_signed::value ? std::numeric_limits::min() : 0;
+ if (in_value < kOutMin || in_value > std::numeric_limits::max()) {
+ return false;
+ }
+ }
+
+ // Check conversion of floating point |in_value| to integral value OutT.
+ if (std::is_floating_point::value) {
+ // Make sure the floating point |in_value| is not NaN and not Inf as
+ // integral type OutT is unable to represent these values.
+ if (sizeof(in_value) > sizeof(double)) {
+ if (std::isnan(static_cast(in_value)) ||
+ std::isinf(static_cast(in_value))) {
+ return false;
+ }
+ } else if (sizeof(in_value) > sizeof(float)) {
+ if (std::isnan(static_cast(in_value)) ||
+ std::isinf(static_cast(in_value))) {
+ return false;
+ }
+ } else {
+ if (std::isnan(static_cast(in_value)) ||
+ std::isinf(static_cast(in_value))) {
+ return false;
+ }
+ }
+
+ // Make sure the floating point |in_value| fits within the range of
+ // values that integral type OutT is able to represent.
+ if (in_value < std::numeric_limits::min() ||
+ in_value >= std::numeric_limits::max()) {
+ return false;
+ }
+ }
+ }
+
+ if (std::is_integral::value && std::is_floating_point::value &&
+ normalized) {
+ // When converting integer to floating point, normalize the value if
+ // necessary.
+ *out_value = static_cast(in_value);
+ *out_value /= static_cast(std::numeric_limits::max());
+ } else if (std::is_floating_point::value &&
+ std::is_integral::value && normalized) {
+ // Converting from floating point to a normalized integer.
+ if (in_value > 1 || in_value < 0) {
+ // Normalized float values need to be between 0 and 1.
+ return false;
+ }
+ // TODO(ostava): Consider allowing float to normalized integer conversion
+ // for 64-bit integer types. Currently it doesn't work because we don't
+ // have a floating point type that could store all 64 bit integers.
+ if (sizeof(OutT) > 4) {
+ return false;
+ }
+ // Expand the float to the range of the output integer and round it to the
+ // nearest representable value. Use doubles for the math to ensure the
+ // integer values are represented properly during the conversion process.
+ *out_value = static_cast(std::floor(
+ in_value * static_cast(std::numeric_limits::max()) +
+ 0.5));
+ } else {
+ *out_value = static_cast(in_value);
+ }
+
+ // TODO(ostava): Add handling of normalized attributes when converting
+ // between different integer representations. If the attribute is
+ // normalized, integer values should be converted as if they represent 0-1
+ // range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1>
+ // should be converted to range <0, 2^8 - 1>.
+ return true;
+ }
+
DataBuffer *buffer_;
// The buffer descriptor is stored at the time the buffer is attached to this
// attribute. The purpose is to detect if any changes happened to the buffer
// since the time it was attached.
DataBufferDescriptor buffer_descriptor_;
- int8_t num_components_;
+ uint8_t num_components_;
DataType data_type_;
bool normalized_;
int64_t byte_stride_;
@@ -323,6 +448,54 @@ class GeometryAttribute {
friend struct GeometryAttributeHasher;
};
+#ifdef DRACO_TRANSCODER_SUPPORTED
+template
+Status GeometryAttribute::ConvertAndSetAttributeValue(AttributeValueIndex avi,
+ int input_num_components,
+ const InputT *value) {
+ switch (this->data_type()) {
+ case DT_INT8:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_UINT8:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_INT16:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_UINT16:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_INT32:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_UINT32:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_INT64:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_UINT64:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_FLOAT32:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_FLOAT64:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ case DT_BOOL:
+ return ConvertAndSetAttributeTypedValue(
+ avi, input_num_components, value);
+ default:
+ break;
+ }
+ return ErrorStatus(
+ "GeometryAttribute::SetAndConvertAttributeValue: Unsupported "
+ "attribute type.");
+}
+#endif
+
// Hashing support
// Function object for using Attribute as a hash key.
diff --git a/contrib/draco/src/draco/attributes/point_attribute.cc b/contrib/draco/src/draco/attributes/point_attribute.cc
index b28f860c1..e54ab5427 100644
--- a/contrib/draco/src/draco/attributes/point_attribute.cc
+++ b/contrib/draco/src/draco/attributes/point_attribute.cc
@@ -222,4 +222,47 @@ AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues(
}
#endif
+#ifdef DRACO_TRANSCODER_SUPPORTED
+void PointAttribute::RemoveUnusedValues() {
+ if (is_mapping_identity()) {
+ return; // For identity mapping, all values are always used.
+ }
+ // For explicit mapping we need to check if any point is mapped to a value.
+ // If not we can delete the value.
+ IndexTypeVector is_value_used(size(), false);
+ int num_used_values = 0;
+ for (PointIndex pi(0); pi < indices_map_.size(); ++pi) {
+ const AttributeValueIndex avi = indices_map_[pi];
+ if (!is_value_used[avi]) {
+ is_value_used[avi] = true;
+ num_used_values++;
+ }
+ }
+ if (num_used_values == size()) {
+ return; // All values are used.
+ }
+
+ // Remap the values and update the point to value mapping.
+ IndexTypeVector
+ old_to_new_value_map(size(), kInvalidAttributeValueIndex);
+ AttributeValueIndex new_avi(0);
+ for (AttributeValueIndex avi(0); avi < size(); ++avi) {
+ if (!is_value_used[avi]) {
+ continue;
+ }
+ if (avi != new_avi) {
+ SetAttributeValue(new_avi, GetAddress(avi));
+ }
+ old_to_new_value_map[avi] = new_avi++;
+ }
+
+ // Remap all points to the new attribute values.
+ for (PointIndex pi(0); pi < indices_map_.size(); ++pi) {
+ indices_map_[pi] = old_to_new_value_map[indices_map_[pi]];
+ }
+
+ num_unique_entries_ = num_used_values;
+}
+#endif
+
} // namespace draco
diff --git a/contrib/draco/src/draco/attributes/point_attribute.h b/contrib/draco/src/draco/attributes/point_attribute.h
index ee3662031..d55c50c8a 100644
--- a/contrib/draco/src/draco/attributes/point_attribute.h
+++ b/contrib/draco/src/draco/attributes/point_attribute.h
@@ -133,6 +133,12 @@ class PointAttribute : public GeometryAttribute {
return attribute_transform_data_.get();
}
+#ifdef DRACO_TRANSCODER_SUPPORTED
+ // Removes unused values from the attribute. Value is unused when no point
+ // is mapped to the value. Only applicable when the mapping is not identity.
+ void RemoveUnusedValues();
+#endif
+
private:
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
template
diff --git a/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc b/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc
index 797c62f30..480e3ff34 100644
--- a/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc
+++ b/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc
@@ -15,14 +15,16 @@
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/core/varint_encoding.h"
+#include "draco/draco_features.h"
namespace draco {
AttributesEncoder::AttributesEncoder()
: point_cloud_encoder_(nullptr), point_cloud_(nullptr) {}
-AttributesEncoder::AttributesEncoder(int att_id) : AttributesEncoder() {
- AddAttributeId(att_id);
+AttributesEncoder::AttributesEncoder(int point_attrib_id)
+ : AttributesEncoder() {
+ AddAttributeId(point_attrib_id);
}
bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) {
@@ -37,7 +39,15 @@ bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) {
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int32_t att_id = point_attribute_ids_[i];
const PointAttribute *const pa = point_cloud_->attribute(att_id);
- out_buffer->Encode(static_cast(pa->attribute_type()));
+ GeometryAttribute::Type type = pa->attribute_type();
+#ifdef DRACO_TRANSCODER_SUPPORTED
+ // Attribute types TANGENT, MATERIAL, JOINTS, and WEIGHTS are not supported
+ // in the official bitstream. They will be encoded as GENERIC.
+ if (type > GeometryAttribute::GENERIC) {
+ type = GeometryAttribute::GENERIC;
+ }
+#endif
+ out_buffer->Encode(static_cast(type));
out_buffer->Encode(static_cast(pa->data_type()));
out_buffer->Encode(static_cast(pa->num_components()));
out_buffer->Encode(static_cast(pa->normalized()));
diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc
index e4d53485d..51c41cf7a 100644
--- a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc
+++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc
@@ -72,16 +72,19 @@ class PointAttributeVectorOutputIterator {
Self &operator*() { return *this; }
// Still needed in some cases.
- // TODO(hemmer): remove.
+ // TODO(b/199760123): Remove.
// hardcoded to 3 based on legacy usage.
const Self &operator=(const VectorD &val) {
DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute.
AttributeTuple &att = attributes_[0];
PointAttribute *attribute = std::get<0>(att);
+ const AttributeValueIndex avi = attribute->mapped_index(point_id_);
+ if (avi >= static_cast(attribute->size())) {
+ return *this;
+ }
const uint32_t &offset = std::get<1>(att);
DRACO_DCHECK_EQ(offset, 0); // expected to be zero
- attribute->SetAttributeValue(attribute->mapped_index(point_id_),
- &val[0] + offset);
+ attribute->SetAttributeValue(avi, &val[0] + offset);
return *this;
}
// Additional operator taking std::vector as argument.
@@ -89,6 +92,10 @@ class PointAttributeVectorOutputIterator {
for (auto index = 0; index < attributes_.size(); index++) {
AttributeTuple &att = attributes_[index];
PointAttribute *attribute = std::get<0>(att);
+ const AttributeValueIndex avi = attribute->mapped_index(point_id_);
+ if (avi >= static_cast(attribute->size())) {
+ return *this;
+ }
const uint32_t &offset = std::get<1>(att);
const uint32_t &data_size = std::get<3>(att);
const uint32_t &num_components = std::get<4>(att);
@@ -103,10 +110,6 @@ class PointAttributeVectorOutputIterator {
// redirect to copied data
data_source = reinterpret_cast(data_);
}
- const AttributeValueIndex avi = attribute->mapped_index(point_id_);
- if (avi >= static_cast(attribute->size())) {
- return *this;
- }
attribute->SetAttributeValue(avi, data_source);
}
return *this;
@@ -195,54 +198,55 @@ bool KdTreeAttributesDecoder::DecodePortableAttributes(
data_size, num_components);
total_dimensionality += num_components;
}
- PointAttributeVectorOutputIterator out_it(atts);
+ typedef PointAttributeVectorOutputIterator OutIt;
+ OutIt out_it(atts);
switch (compression_level) {
case 0: {
- DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
- if (!decoder.DecodePoints(in_buffer, out_it)) {
+ if (!DecodePoints<0, OutIt>(total_dimensionality, num_points, in_buffer,
+ &out_it)) {
return false;
}
break;
}
case 1: {
- DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
- if (!decoder.DecodePoints(in_buffer, out_it)) {
+ if (!DecodePoints<1, OutIt>(total_dimensionality, num_points, in_buffer,
+ &out_it)) {
return false;
}
break;
}
case 2: {
- DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
- if (!decoder.DecodePoints(in_buffer, out_it)) {
+ if (!DecodePoints<2, OutIt>(total_dimensionality, num_points, in_buffer,
+ &out_it)) {
return false;
}
break;
}
case 3: {
- DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
- if (!decoder.DecodePoints(in_buffer, out_it)) {
+ if (!DecodePoints<3, OutIt>(total_dimensionality, num_points, in_buffer,
+ &out_it)) {
return false;
}
break;
}
case 4: {
- DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
- if (!decoder.DecodePoints(in_buffer, out_it)) {
+ if (!DecodePoints<4, OutIt>(total_dimensionality, num_points, in_buffer,
+ &out_it)) {
return false;
}
break;
}
case 5: {
- DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
- if (!decoder.DecodePoints(in_buffer, out_it)) {
+ if (!DecodePoints<5, OutIt>(total_dimensionality, num_points, in_buffer,
+ &out_it)) {
return false;
}
break;
}
case 6: {
- DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
- if (!decoder.DecodePoints(in_buffer, out_it)) {
+ if (!DecodePoints<6, OutIt>(total_dimensionality, num_points, in_buffer,
+ &out_it)) {
return false;
}
break;
@@ -253,6 +257,19 @@ bool KdTreeAttributesDecoder::DecodePortableAttributes(
return true;
}
+template
+bool KdTreeAttributesDecoder::DecodePoints(int total_dimensionality,
+ int num_expected_points,
+ DecoderBuffer *in_buffer,
+ OutIteratorT *out_iterator) {
+ DynamicIntegerPointsKdTreeDecoder decoder(total_dimensionality);
+ if (!decoder.DecodePoints(in_buffer, *out_iterator, num_expected_points) ||
+ decoder.num_decoded_points() != num_expected_points) {
+ return false;
+ }
+ return true;
+}
+
bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
DecoderBuffer *in_buffer) {
if (in_buffer->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 3)) {
@@ -336,6 +353,10 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
return false;
}
if (method == KdTreeAttributesEncodingMethod::kKdTreeQuantizationEncoding) {
+ // This method only supports one attribute with exactly three components.
+ if (atts.size() != 1 || std::get<4>(atts[0]) != 3) {
+ return false;
+ }
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level)) {
return false;
@@ -376,7 +397,7 @@ bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
GetDecoder()->point_cloud()->attribute(att_id);
attr->Reset(num_points);
attr->SetIdentityMapping();
- };
+ }
PointAttributeVectorOutputIterator out_it(atts);
@@ -455,7 +476,11 @@ bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType(
att->GetValue(avi, &unsigned_val[0]);
for (int c = 0; c < att->num_components(); ++c) {
// Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for
- // smaller data types.
+ // smaller data types. But first check that the up-casting does not cause
+ // signed integer overflow.
+ if (unsigned_val[c] > std::numeric_limits::max()) {
+ return false;
+ }
signed_val[c] = static_cast(
static_cast(unsigned_val[c]) +
min_signed_values_[num_processed_signed_components + c]);
diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h
index 87338d6b0..4af367a1a 100644
--- a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h
+++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h
@@ -31,6 +31,10 @@ class KdTreeAttributesDecoder : public AttributesDecoder {
bool TransformAttributesToOriginalFormat() override;
private:
+ template
+ bool DecodePoints(int total_dimensionality, int num_expected_points,
+ DecoderBuffer *in_buffer, OutIteratorT *out_iterator);
+
template
bool TransformAttributeBackToSignedType(PointAttribute *att,
int num_processed_signed_components);
diff --git a/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h b/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h
index 8a6f25b66..b717d0dbe 100644
--- a/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h
+++ b/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h
@@ -61,7 +61,7 @@ class OctahedronToolBox {
return false;
}
quantization_bits_ = q;
- max_quantized_value_ = (1 << quantization_bits_) - 1;
+ max_quantized_value_ = (1u << quantization_bits_) - 1;
max_value_ = max_quantized_value_ - 1;
dequantization_scale_ = 2.f / max_value_;
center_value_ = max_value_ / 2;
@@ -208,7 +208,9 @@ class OctahedronToolBox {
DRACO_DCHECK_LE(t, center_value_);
DRACO_DCHECK_GE(s, -center_value_);
DRACO_DCHECK_GE(t, -center_value_);
- return std::abs(s) + std::abs(t) <= center_value_;
+ const uint32_t st =
+ static_cast(std::abs(s)) + static_cast(std::abs(t));
+ return st <= center_value_;
}
void InvertDiamond(int32_t *s, int32_t *t) const {
@@ -230,19 +232,29 @@ class OctahedronToolBox {
sign_t = (*t > 0) ? 1 : -1;
}
- const int32_t corner_point_s = sign_s * center_value_;
- const int32_t corner_point_t = sign_t * center_value_;
- *s = 2 * *s - corner_point_s;
- *t = 2 * *t - corner_point_t;
+ // Perform the addition and subtraction using unsigned integers to avoid
+ // signed integer overflows for bad data. Note that the result will be
+ // unchanged for non-overflowing cases.
+ const uint32_t corner_point_s = sign_s * center_value_;
+ const uint32_t corner_point_t = sign_t * center_value_;
+ uint32_t us = *s;
+ uint32_t ut = *t;
+ us = us + us - corner_point_s;
+ ut = ut + ut - corner_point_t;
if (sign_s * sign_t >= 0) {
- int32_t temp = *s;
- *s = -*t;
- *t = -temp;
+ uint32_t temp = us;
+ us = -ut;
+ ut = -temp;
} else {
- std::swap(*s, *t);
+ std::swap(us, ut);
}
- *s = (*s + corner_point_s) / 2;
- *t = (*t + corner_point_t) / 2;
+ us = us + corner_point_s;
+ ut = ut + corner_point_t;
+
+ *s = us;
+ *t = ut;
+ *s /= 2;
+ *t /= 2;
}
void InvertDirection(int32_t *s, int32_t *t) const {
@@ -318,7 +330,7 @@ class OctahedronToolBox {
// Remaining coordinate can be computed by projecting the (y, z) values onto
// the surface of the octahedron.
- const float x = 1.f - abs(y) - abs(z);
+ const float x = 1.f - std::abs(y) - std::abs(z);
// |x| is essentially a signed distance from the diagonal edges of the
// diamond shown on the figure above. It is positive for all points in the
diff --git a/contrib/draco/src/draco/compression/attributes/point_d_vector.h b/contrib/draco/src/draco/compression/attributes/point_d_vector.h
index 3b115d500..6ceb454ae 100644
--- a/contrib/draco/src/draco/compression/attributes/point_d_vector.h
+++ b/contrib/draco/src/draco/compression/attributes/point_d_vector.h
@@ -16,7 +16,9 @@
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
+#include
#include
+#include
#include
#include
@@ -99,11 +101,17 @@ class PointDVector {
data_(n_items * dimensionality),
data0_(data_.data()) {}
// random access iterator
- class PointDVectorIterator
- : public std::iterator {
+ class PointDVectorIterator {
friend class PointDVector;
public:
+ // Iterator traits expected by std libraries.
+ using iterator_category = std::random_access_iterator_tag;
+ using value_type = size_t;
+ using difference_type = size_t;
+ using pointer = PointDVector *;
+ using reference = PointDVector &;
+
// std::iter_swap is called inside of std::partition and needs this
// specialized support
PseudoPointD operator*() const {
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h
index 36c124baa..17899d054 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h
@@ -22,6 +22,7 @@
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
+#include "draco/core/math_utils.h"
#include "draco/core/varint_decoding.h"
#include "draco/draco_features.h"
@@ -161,7 +162,8 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
if (!is_crease) {
++num_used_parallelograms;
for (int j = 0; j < num_components; ++j) {
- multi_pred_vals[j] += pred_vals[i][j];
+ multi_pred_vals[j] =
+ AddAsUnsigned(multi_pred_vals[j], pred_vals[i][j]);
}
}
}
@@ -210,6 +212,9 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
if (!DecodeVarint(&num_flags, buffer)) {
return false;
}
+ if (num_flags > this->mesh_data().corner_table()->num_corners()) {
+ return false;
+ }
if (num_flags > 0) {
is_crease_edge_[i].resize(num_flags);
RAnsBitDecoder decoder;
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h
index 77df8ee24..736598b15 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h
@@ -392,7 +392,7 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
RAnsBitEncoder encoder;
encoder.StartEncoding();
// Encode the crease edge flags in the reverse vertex order that is needed
- // be the decoder. Note that for the currently supported mode, each vertex
+ // by the decoder. Note that for the currently supported mode, each vertex
// has exactly |num_used_parallelograms| edges that need to be encoded.
for (int j = static_cast(is_crease_edge_[i].size()) -
num_used_parallelograms;
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h
index fc82e0a8f..9825c7261 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h
@@ -18,6 +18,7 @@
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+#include "draco/core/math_utils.h"
#include "draco/draco_features.h"
namespace draco {
@@ -89,7 +90,8 @@ bool MeshPredictionSchemeMultiParallelogramDecoder(data[data_offset + 1]));
}
- void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
+ bool ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
int data_id);
private:
@@ -123,6 +123,10 @@ bool MeshPredictionSchemeTexCoordsDecoder::
ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int /* size */, int num_components,
const PointIndex *entry_to_point_id_map) {
+ if (num_components != 2) {
+ // Corrupt/malformed input. Two output components are req'd.
+ return false;
+ }
num_components_ = num_components;
entry_to_point_id_map_ = entry_to_point_id_map;
predicted_value_ =
@@ -133,7 +137,9 @@ bool MeshPredictionSchemeTexCoordsDecoder::
static_cast(this->mesh_data().data_to_corner_map()->size());
for (int p = 0; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
- ComputePredictedValue(corner_id, out_data, p);
+ if (!ComputePredictedValue(corner_id, out_data, p)) {
+ return false;
+ }
const int dst_offset = p * num_components;
this->transform().ComputeOriginalValue(
@@ -159,6 +165,11 @@ bool MeshPredictionSchemeTexCoordsDecoder::
if (num_orientations == 0) {
return false;
}
+ if (num_orientations > this->mesh_data().corner_table()->num_corners()) {
+ // We can't have more orientations than the maximum number of decoded
+ // values.
+ return false;
+ }
orientations_.resize(num_orientations);
bool last_orientation = true;
RAnsBitDecoder decoder;
@@ -177,7 +188,7 @@ bool MeshPredictionSchemeTexCoordsDecoder::
}
template
-void MeshPredictionSchemeTexCoordsDecoder::
+bool MeshPredictionSchemeTexCoordsDecoder::
ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
int data_id) {
// Compute the predicted UV coordinate from the positions on all corners
@@ -206,9 +217,17 @@ void MeshPredictionSchemeTexCoordsDecoder::
const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data);
if (p_uv == n_uv) {
// We cannot do a reliable prediction on degenerated UV triangles.
- predicted_value_[0] = static_cast(p_uv[0]);
- predicted_value_[1] = static_cast(p_uv[1]);
- return;
+ // Technically floats > INT_MAX are undefined, but compilers will
+ // convert those values to INT_MIN. We are being explicit here for asan.
+ for (const int i : {0, 1}) {
+ if (std::isnan(p_uv[i]) || static_cast(p_uv[i]) > INT_MAX ||
+ static_cast(p_uv[i]) < INT_MIN) {
+ predicted_value_[i] = INT_MIN;
+ } else {
+ predicted_value_[i] = static_cast(p_uv[i]);
+ }
+ }
+ return true;
}
// Get positions at all corners.
@@ -282,32 +301,40 @@ void MeshPredictionSchemeTexCoordsDecoder::
const float pnvs = pn_uv[1] * s + n_uv[1];
const float pnvt = pn_uv[1] * t;
Vector2f predicted_uv;
+ if (orientations_.empty()) {
+ return false;
+ }
// When decoding the data, we already know which orientation to use.
const bool orientation = orientations_.back();
orientations_.pop_back();
- if (orientation)
+ if (orientation) {
predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut);
- else
+ } else {
predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut);
-
+ }
if (std::is_integral::value) {
// Round the predicted value for integer types.
- if (std::isnan(predicted_uv[0])) {
+ // Technically floats > INT_MAX are undefined, but compilers will
+ // convert those values to INT_MIN. We are being explicit here for asan.
+ const double u = floor(predicted_uv[0] + 0.5);
+ if (std::isnan(u) || u > INT_MAX || u < INT_MIN) {
predicted_value_[0] = INT_MIN;
} else {
- predicted_value_[0] = static_cast(floor(predicted_uv[0] + 0.5));
+ predicted_value_[0] = static_cast(u);
}
- if (std::isnan(predicted_uv[1])) {
+ const double v = floor(predicted_uv[1] + 0.5);
+ if (std::isnan(v) || v > INT_MAX || v < INT_MIN) {
predicted_value_[1] = INT_MIN;
} else {
- predicted_value_[1] = static_cast(floor(predicted_uv[1] + 0.5));
+ predicted_value_[1] = static_cast(v);
}
} else {
predicted_value_[0] = static_cast(predicted_uv[0]);
predicted_value_[1] = static_cast(predicted_uv[1]);
}
- return;
+
+ return true;
}
// Else we don't have available textures on both corners. For such case we
// can't use positions for predicting the uv value and we resort to delta
@@ -330,12 +357,13 @@ void MeshPredictionSchemeTexCoordsDecoder::
for (int i = 0; i < num_components_; ++i) {
predicted_value_[i] = 0;
}
- return;
+ return true;
}
}
for (int i = 0; i < num_components_; ++i) {
predicted_value_[i] = data[data_offset + i];
}
+ return true;
}
} // namespace draco
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h
index 741ec66dc..44fcc7a6a 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h
@@ -98,7 +98,10 @@ bool MeshPredictionSchemeTexCoordsPortableEncoder(this->mesh_data().data_to_corner_map()->size() - 1);
p >= 0; --p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
- predictor_.template ComputePredictedValue(corner_id, in_data, p);
+ if (!predictor_.template ComputePredictedValue(corner_id, in_data,
+ p)) {
+ return false;
+ }
const int dst_offset = p * num_components;
this->transform().ComputeCorrection(in_data + dst_offset,
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h
index f05e5ddd7..26262fb13 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h
@@ -17,6 +17,9 @@
#include
+#include
+#include
+
#include "draco/attributes/point_attribute.h"
#include "draco/core/math_utils.h"
#include "draco/core/vector_d.h"
@@ -105,10 +108,14 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
next_data_id = mesh_data_.vertex_to_data_map()->at(next_vert_id);
prev_data_id = mesh_data_.vertex_to_data_map()->at(prev_vert_id);
+ typedef VectorD Vec2;
+ typedef VectorD Vec3;
+ typedef VectorD Vec2u;
+
if (prev_data_id < data_id && next_data_id < data_id) {
// Both other corners have available UV coordinates for prediction.
- const VectorD n_uv = GetTexCoordForEntryId(next_data_id, data);
- const VectorD p_uv = GetTexCoordForEntryId(prev_data_id, data);
+ const Vec2 n_uv = GetTexCoordForEntryId(next_data_id, data);
+ const Vec2 p_uv = GetTexCoordForEntryId(prev_data_id, data);
if (p_uv == n_uv) {
// We cannot do a reliable prediction on degenerated UV triangles.
predicted_value_[0] = p_uv[0];
@@ -117,9 +124,9 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
}
// Get positions at all corners.
- const VectorD tip_pos = GetPositionForEntryId(data_id);
- const VectorD next_pos = GetPositionForEntryId(next_data_id);
- const VectorD prev_pos = GetPositionForEntryId(prev_data_id);
+ const Vec3 tip_pos = GetPositionForEntryId(data_id);
+ const Vec3 next_pos = GetPositionForEntryId(next_data_id);
+ const Vec3 prev_pos = GetPositionForEntryId(prev_data_id);
// We use the positions of the above triangle to predict the texture
// coordinate on the tip corner C.
// To convert the triangle into the UV coordinate system we first compute
@@ -135,17 +142,17 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
// Where next_pos is point (N), prev_pos is point (P) and tip_pos is the
// position of predicted coordinate (C).
//
- const VectorD pn = prev_pos - next_pos;
+ const Vec3 pn = prev_pos - next_pos;
const uint64_t pn_norm2_squared = pn.SquaredNorm();
if (pn_norm2_squared != 0) {
// Compute the projection of C onto PN by computing dot product of CN with
// PN and normalizing it by length of PN. This gives us a factor |s| where
// |s = PN.Dot(CN) / PN.SquaredNorm2()|. This factor can be used to
// compute X in UV space |X_UV| as |X_UV = N_UV + s * PN_UV|.
- const VectorD cn = tip_pos - next_pos;
+ const Vec3 cn = tip_pos - next_pos;
const int64_t cn_dot_pn = pn.Dot(cn);
- const VectorD pn_uv = p_uv - n_uv;
+ const Vec2 pn_uv = p_uv - n_uv;
// Because we perform all computations with integers, we don't explicitly
// compute the normalized factor |s|, but rather we perform all operations
// over UV vectors in a non-normalized coordinate system scaled with a
@@ -153,19 +160,30 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
//
// x_uv = X_UV * PN.Norm2Squared()
//
- const VectorD x_uv =
- n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv);
-
+ const int64_t n_uv_absmax_element =
+ std::max(std::abs(n_uv[0]), std::abs(n_uv[1]));
+ if (n_uv_absmax_element >
+ std::numeric_limits::max() / pn_norm2_squared) {
+ // Return false if the below multiplication would overflow.
+ return false;
+ }
+ const int64_t pn_uv_absmax_element =
+ std::max(std::abs(pn_uv[0]), std::abs(pn_uv[1]));
+ if (cn_dot_pn >
+ std::numeric_limits::max() / pn_uv_absmax_element) {
+ // Return false if squared length calculation would overflow.
+ return false;
+ }
+ const Vec2 x_uv = n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv);
const int64_t pn_absmax_element =
std::max(std::max(std::abs(pn[0]), std::abs(pn[1])), std::abs(pn[2]));
if (cn_dot_pn > std::numeric_limits::max() / pn_absmax_element) {
- // return false if squared length calculation would overflow.
+ // Return false if squared length calculation would overflow.
return false;
}
// Compute squared length of vector CX in position coordinate system:
- const VectorD x_pos =
- next_pos + (cn_dot_pn * pn) / pn_norm2_squared;
+ const Vec3 x_pos = next_pos + (cn_dot_pn * pn) / pn_norm2_squared;
const uint64_t cx_norm2_squared = (tip_pos - x_pos).SquaredNorm();
// Compute vector CX_UV in the uv space by rotating vector PN_UV by 90
@@ -182,7 +200,7 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
//
// cx_uv = CX.Norm2() * PN.Norm2() * Rot(PN_UV)
//
- VectorD cx_uv(pn_uv[1], -pn_uv[0]); // Rotated PN_UV.
+ Vec2 cx_uv(pn_uv[1], -pn_uv[0]); // Rotated PN_UV.
// Compute CX.Norm2() * PN.Norm2()
const uint64_t norm_squared =
IntSqrt(cx_norm2_squared * pn_norm2_squared);
@@ -191,17 +209,15 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
// Predicted uv coordinate is then computed by either adding or
// subtracting CX_UV to/from X_UV.
- VectorD predicted_uv;
+ Vec2 predicted_uv;
if (is_encoder_t) {
// When encoding, compute both possible vectors and determine which one
// results in a better prediction.
// Both vectors need to be transformed back from the scaled space to
// the real UV coordinate space.
- const VectorD predicted_uv_0((x_uv + cx_uv) /
- pn_norm2_squared);
- const VectorD predicted_uv_1((x_uv - cx_uv) /
- pn_norm2_squared);
- const VectorD c_uv = GetTexCoordForEntryId(data_id, data);
+ const Vec2 predicted_uv_0((x_uv + cx_uv) / pn_norm2_squared);
+ const Vec2 predicted_uv_1((x_uv - cx_uv) / pn_norm2_squared);
+ const Vec2 c_uv = GetTexCoordForEntryId(data_id, data);
if ((c_uv - predicted_uv_0).SquaredNorm() <
(c_uv - predicted_uv_1).SquaredNorm()) {
predicted_uv = predicted_uv_0;
@@ -217,10 +233,12 @@ bool MeshPredictionSchemeTexCoordsPortablePredictor<
}
const bool orientation = orientations_.back();
orientations_.pop_back();
+ // Perform operations in unsigned type to avoid signed integer overflow.
+ // Note that the result will be the same (for non-overflowing values).
if (orientation) {
- predicted_uv = (x_uv + cx_uv) / pn_norm2_squared;
+ predicted_uv = Vec2(Vec2u(x_uv) + Vec2u(cx_uv)) / pn_norm2_squared;
} else {
- predicted_uv = (x_uv - cx_uv) / pn_norm2_squared;
+ predicted_uv = Vec2(Vec2u(x_uv) - Vec2u(cx_uv)) / pn_norm2_squared;
}
}
predicted_value_[0] = static_cast(predicted_uv[0]);
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
index f410a6cd2..2338f2f76 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
@@ -18,22 +18,58 @@ namespace draco {
PredictionSchemeMethod SelectPredictionMethod(
int att_id, const PointCloudEncoder *encoder) {
- if (encoder->options()->GetSpeed() >= 10) {
+ return SelectPredictionMethod(att_id, *encoder->options(), encoder);
+}
+
+PredictionSchemeMethod SelectPredictionMethod(
+ int att_id, const EncoderOptions &options,
+ const PointCloudEncoder *encoder) {
+ if (options.GetSpeed() >= 10) {
// Selected fastest, though still doing some compression.
return PREDICTION_DIFFERENCE;
}
if (encoder->GetGeometryType() == TRIANGULAR_MESH) {
// Use speed setting to select the best encoding method.
+ const int att_quant =
+ options.GetAttributeInt(att_id, "quantization_bits", -1);
const PointAttribute *const att = encoder->point_cloud()->attribute(att_id);
- if (att->attribute_type() == GeometryAttribute::TEX_COORD) {
- if (encoder->options()->GetSpeed() < 4) {
+ if (att_quant != -1 &&
+ att->attribute_type() == GeometryAttribute::TEX_COORD &&
+ att->num_components() == 2) {
+ // Texture coordinate predictor needs a position attribute that is either
+ // integer or quantized. For numerical reasons, we require the position
+ // quantization to be at most 21 bits and the 2*position_quantization +
+ // uv_quantization < 64 (TODO(b/231259902)).
+ const PointAttribute *const pos_att =
+ encoder->point_cloud()->GetNamedAttribute(
+ GeometryAttribute::POSITION);
+ bool is_pos_att_valid = false;
+ if (pos_att) {
+ if (IsDataTypeIntegral(pos_att->data_type())) {
+ is_pos_att_valid = true;
+ } else {
+ // Check quantization of the position attribute.
+ const int pos_att_id = encoder->point_cloud()->GetNamedAttributeId(
+ GeometryAttribute::POSITION);
+ const int pos_quant =
+ options.GetAttributeInt(pos_att_id, "quantization_bits", -1);
+ // Must be quantized but the quantization is restricted to 21 bits and
+ // 2*|pos_quant|+|att_quant| must be smaller than 64 bits.
+ if (pos_quant > 0 && pos_quant <= 21 &&
+ 2 * pos_quant + att_quant < 64) {
+ is_pos_att_valid = true;
+ }
+ }
+ }
+
+ if (is_pos_att_valid && options.GetSpeed() < 4) {
// Use texture coordinate prediction for speeds 0, 1, 2, 3.
return MESH_PREDICTION_TEX_COORDS_PORTABLE;
}
}
if (att->attribute_type() == GeometryAttribute::NORMAL) {
#ifdef DRACO_NORMAL_ENCODING_SUPPORTED
- if (encoder->options()->GetSpeed() < 4) {
+ if (options.GetSpeed() < 4) {
// Use geometric normal prediction for speeds 0, 1, 2, 3.
// For this prediction, the position attribute needs to be either
// integer or quantized as well.
@@ -43,8 +79,8 @@ PredictionSchemeMethod SelectPredictionMethod(
encoder->point_cloud()->GetNamedAttribute(
GeometryAttribute::POSITION);
if (pos_att && (IsDataTypeIntegral(pos_att->data_type()) ||
- encoder->options()->GetAttributeInt(
- pos_att_id, "quantization_bits", -1) > 0)) {
+ options.GetAttributeInt(pos_att_id, "quantization_bits",
+ -1) > 0)) {
return MESH_PREDICTION_GEOMETRIC_NORMAL;
}
}
@@ -52,11 +88,10 @@ PredictionSchemeMethod SelectPredictionMethod(
return PREDICTION_DIFFERENCE; // default
}
// Handle other attribute types.
- if (encoder->options()->GetSpeed() >= 8) {
+ if (options.GetSpeed() >= 8) {
return PREDICTION_DIFFERENCE;
}
- if (encoder->options()->GetSpeed() >= 2 ||
- encoder->point_cloud()->num_points() < 40) {
+ if (options.GetSpeed() >= 2 || encoder->point_cloud()->num_points() < 40) {
// Parallelogram prediction is used for speeds 2 - 7 or when the overhead
// of using constrained multi-parallelogram would be too high.
return MESH_PREDICTION_PARALLELOGRAM;
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
index 40a7683aa..11db5a62e 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
@@ -38,6 +38,10 @@ namespace draco {
PredictionSchemeMethod SelectPredictionMethod(int att_id,
const PointCloudEncoder *encoder);
+PredictionSchemeMethod SelectPredictionMethod(int att_id,
+ const EncoderOptions &options,
+ const PointCloudEncoder *encoder);
+
// Factory class for creating mesh prediction schemes.
template
struct MeshPredictionSchemeEncoderFactory {
@@ -97,10 +101,11 @@ CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id,
// template nature of the prediction schemes).
const MeshEncoder *const mesh_encoder =
static_cast(encoder);
+ const uint16_t bitstream_version = kDracoMeshBitstreamVersion;
auto ret = CreateMeshPredictionScheme<
MeshEncoder, PredictionSchemeEncoder,
MeshPredictionSchemeEncoderFactory>(
- mesh_encoder, method, att_id, transform, kDracoMeshBitstreamVersion);
+ mesh_encoder, method, att_id, transform, bitstream_version);
if (ret) {
return ret;
}
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h
index 5a6c7c2dd..e9e345343 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h
@@ -21,6 +21,7 @@
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h"
#include "draco/core/decoder_buffer.h"
#include "draco/core/macros.h"
+#include "draco/core/math_utils.h"
#include "draco/core/vector_d.h"
namespace draco {
@@ -98,9 +99,8 @@ class PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform
if (!pred_is_in_bottom_left) {
pred = this->RotatePoint(pred, rotation_count);
}
- Point2 orig = pred + corr;
- orig[0] = this->ModMax(orig[0]);
- orig[1] = this->ModMax(orig[1]);
+ Point2 orig(this->ModMax(AddAsUnsigned(pred[0], corr[0])),
+ this->ModMax(AddAsUnsigned(pred[1], corr[1])));
if (!pred_is_in_bottom_left) {
const int32_t reverse_rotation_count = (4 - rotation_count) % 4;
orig = this->RotatePoint(orig, reverse_rotation_count);
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
index 8c8932f77..298758d8c 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
@@ -25,10 +25,10 @@ class PredictionSchemeNormalOctahedronCanonicalizedTransformTest
Transform;
typedef Transform::Point2 Point2;
- void TestComputeCorrection(const Transform &transform, const int32_t &ox,
- const int32_t &oy, const int32_t &px,
- const int32_t &py, const int32_t &cx,
- const int32_t &cy) {
+ void TestComputeCorrection(const Transform &transform, const int32_t ox,
+ const int32_t oy, const int32_t px,
+ const int32_t py, const int32_t cx,
+ const int32_t cy) {
const int32_t o[2] = {ox + 7, oy + 7};
const int32_t p[2] = {px + 7, py + 7};
int32_t corr[2] = {500, 500};
@@ -38,7 +38,7 @@ class PredictionSchemeNormalOctahedronCanonicalizedTransformTest
}
void TestGetRotationCount(const Transform &transform, const Point2 &pred,
- const int32_t &rot_dir) {
+ const int32_t rot_dir) {
const int32_t rotation_count = transform.GetRotationCount(pred);
ASSERT_EQ(rot_dir, rotation_count);
}
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h
index a1bc4a327..d3705c8ad 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h
@@ -80,19 +80,31 @@ class PredictionSchemeNormalOctahedronDecodingTransform
private:
Point2 ComputeOriginalValue(Point2 pred, const Point2 &corr) const {
const Point2 t(this->center_value(), this->center_value());
- pred = pred - t;
+ typedef typename std::make_unsigned::type UnsignedDataTypeT;
+ typedef VectorD Point2u;
+
+ // Perform the addition in unsigned type to avoid signed integer overflow.
+ // Note that the result will be the same (for non-overflowing values).
+ pred = Point2(Point2u(pred) - Point2u(t));
const bool pred_is_in_diamond = this->IsInDiamond(pred[0], pred[1]);
if (!pred_is_in_diamond) {
this->InvertDiamond(&pred[0], &pred[1]);
}
- Point2 orig = pred + corr;
+
+ // Perform the addition in unsigned type to avoid signed integer overflow.
+ // Note that the result will be the same (for non-overflowing values).
+ Point2 orig(Point2u(pred) + Point2u(corr));
+
orig[0] = this->ModMax(orig[0]);
orig[1] = this->ModMax(orig[1]);
if (!pred_is_in_diamond) {
this->InvertDiamond(&orig[0], &orig[1]);
}
- orig = orig + t;
+
+ // Perform the addition in unsigned type to avoid signed integer overflow.
+ // Note that the result will be the same (for non-overflowing values).
+ orig = Point2(Point2u(orig) + Point2u(t));
return orig;
}
};
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc
index 1001b19fa..1403973c4 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc
@@ -23,10 +23,10 @@ class PredictionSchemeNormalOctahedronTransformTest : public ::testing::Test {
Transform;
typedef Transform::Point2 Point2;
- void TestComputeCorrection(const Transform &transform, const int32_t &ox,
- const int32_t &oy, const int32_t &px,
- const int32_t &py, const int32_t &cx,
- const int32_t &cy) {
+ void TestComputeCorrection(const Transform &transform, const int32_t ox,
+ const int32_t oy, const int32_t px,
+ const int32_t py, const int32_t cx,
+ const int32_t cy) {
const int32_t o[2] = {ox + 7, oy + 7};
const int32_t p[2] = {px + 7, py + 7};
int32_t corr[2] = {500, 500};
diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h
index 26f61fbaf..bba3de09c 100644
--- a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h
+++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h
@@ -70,10 +70,10 @@ class PredictionSchemeWrapTransformBase {
clamped_value_[i] = predicted_val[i];
}
}
- return &clamped_value_[0];
+ return clamped_value_.data();
}
- // TODO(hemmer): Consider refactoring to avoid this dummy.
+ // TODO(b/199760123): Consider refactoring to avoid this dummy.
int quantization_bits() const {
DRACO_DCHECK(false);
return -1;
diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc
index 83f42125a..17f32fc16 100644
--- a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc
+++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc
@@ -148,8 +148,9 @@ bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
return false;
}
for (size_t i = 0; i < num_values; ++i) {
- if (!in_buffer->Decode(portable_attribute_data + i, num_bytes))
+ if (!in_buffer->Decode(portable_attribute_data + i, num_bytes)) {
return false;
+ }
}
}
}
@@ -228,12 +229,13 @@ void SequentialIntegerAttributeDecoder::StoreTypedValues(uint32_t num_values) {
void SequentialIntegerAttributeDecoder::PreparePortableAttribute(
int num_entries, int num_components) {
- GeometryAttribute va;
- va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32,
+ GeometryAttribute ga;
+ ga.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32,
false, num_components * DataTypeLength(DT_INT32), 0);
- std::unique_ptr port_att(new PointAttribute(va));
+ std::unique_ptr port_att(new PointAttribute(ga));
port_att->SetIdentityMapping();
port_att->Reset(num_entries);
+ port_att->set_unique_id(attribute()->unique_id());
SetPortableAttribute(std::move(port_att));
}
diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc
index e66a0a8a4..5f673be42 100644
--- a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc
+++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc
@@ -138,9 +138,11 @@ bool SequentialIntegerAttributeEncoder::EncodeValues(
// All integer values are initialized. Process them using the prediction
// scheme if we have one.
if (prediction_scheme_) {
- prediction_scheme_->ComputeCorrectionValues(
- portable_attribute_data, &encoded_data[0], num_values, num_components,
- point_ids.data());
+ if (!prediction_scheme_->ComputeCorrectionValues(
+ portable_attribute_data, &encoded_data[0], num_values,
+ num_components, point_ids.data())) {
+ return false;
+ }
}
if (prediction_scheme_ == nullptr ||
diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc
index 2e20e89e6..3c5ef0ebc 100644
--- a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc
+++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc
@@ -20,8 +20,9 @@ namespace draco {
bool SequentialNormalAttributeEncoder::Init(PointCloudEncoder *encoder,
int attribute_id) {
- if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id))
+ if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id)) {
return false;
+ }
// Currently this encoder works only for 3-component normal vectors.
if (attribute()->num_components() != 3) {
return false;
diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h
index b9fbc2d6f..6273692a2 100644
--- a/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h
+++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h
@@ -47,14 +47,13 @@ class DirectBitDecoder {
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
// > 0 and <= 32.
- void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
+ bool DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
DRACO_DCHECK_EQ(true, nbits <= 32);
DRACO_DCHECK_EQ(true, nbits > 0);
const int remaining = 32 - num_used_bits_;
if (nbits <= remaining) {
if (pos_ == bits_.end()) {
- *value = 0;
- return;
+ return false;
}
*value = (*pos_ << num_used_bits_) >> (32 - nbits);
num_used_bits_ += nbits;
@@ -64,8 +63,7 @@ class DirectBitDecoder {
}
} else {
if (pos_ + 1 == bits_.end()) {
- *value = 0;
- return;
+ return false;
}
const uint32_t value_l = ((*pos_) << num_used_bits_);
num_used_bits_ = nbits - remaining;
@@ -73,6 +71,7 @@ class DirectBitDecoder {
const uint32_t value_r = (*pos_) >> (32 - num_used_bits_);
*value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r;
}
+ return true;
}
void EndDecoding() {}
diff --git a/contrib/draco/src/draco/compression/config/encoder_options.h b/contrib/draco/src/draco/compression/config/encoder_options.h
index ed1b02068..e8a55bbba 100644
--- a/contrib/draco/src/draco/compression/config/encoder_options.h
+++ b/contrib/draco/src/draco/compression/config/encoder_options.h
@@ -65,6 +65,10 @@ class EncoderOptionsBase : public DracoOptions {
this->SetGlobalInt("encoding_speed", encoding_speed);
this->SetGlobalInt("decoding_speed", decoding_speed);
}
+ bool IsSpeedSet() const {
+ return this->IsGlobalOptionSet("encoding_speed") ||
+ this->IsGlobalOptionSet("decoding_speed");
+ }
// Sets a given feature as supported or unsupported by the target decoder.
// Encoder will always use only supported features when encoding the input
diff --git a/contrib/draco/src/draco/compression/decode_test.cc b/contrib/draco/src/draco/compression/decode_test.cc
index 198714690..8f3e7f4e9 100644
--- a/contrib/draco/src/draco/compression/decode_test.cc
+++ b/contrib/draco/src/draco/compression/decode_test.cc
@@ -17,9 +17,11 @@
#include
#include
+#include "draco/compression/encode.h"
#include "draco/core/draco_test_base.h"
#include "draco/core/draco_test_utils.h"
#include "draco/io/file_utils.h"
+#include "draco/io/obj_encoder.h"
namespace {
@@ -166,4 +168,78 @@ TEST_F(DecodeTest, TestSkipAttributeTransformWithNoQuantization) {
ASSERT_EQ(pos_att->GetAttributeTransformData(), nullptr);
}
+TEST_F(DecodeTest, TestSkipAttributeTransformUniqueId) {
+ // Tests that decoders preserve unique id of attributes even when their
+ // attribute transforms are skipped.
+ const std::string file_name = "cube_att.obj";
+ auto src_mesh = draco::ReadMeshFromTestFile(file_name);
+ ASSERT_NE(src_mesh, nullptr);
+
+ constexpr int kPosUniqueId = 7;
+ constexpr int kNormUniqueId = 42;
+ // Set unique ids for some of the attributes.
+ src_mesh
+ ->attribute(
+ src_mesh->GetNamedAttributeId(draco::GeometryAttribute::POSITION))
+ ->set_unique_id(kPosUniqueId);
+ src_mesh
+ ->attribute(
+ src_mesh->GetNamedAttributeId(draco::GeometryAttribute::NORMAL))
+ ->set_unique_id(kNormUniqueId);
+
+ draco::EncoderBuffer encoder_buffer;
+ draco::Encoder encoder;
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 10);
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 11);
+ encoder.EncodeMeshToBuffer(*src_mesh, &encoder_buffer);
+
+ // Create a draco decoding buffer.
+ draco::DecoderBuffer buffer;
+ buffer.Init(encoder_buffer.data(), encoder_buffer.size());
+
+ // First we decode the mesh without skipping the attribute transforms.
+ draco::Decoder decoder_no_skip;
+ std::unique_ptr mesh_no_skip =
+ decoder_no_skip.DecodeMeshFromBuffer(&buffer).value();
+ ASSERT_NE(mesh_no_skip, nullptr);
+
+ // Now we decode it again while skipping some attributes.
+ draco::Decoder decoder_skip;
+ // Make sure we skip dequantization for the position and normal attribute.
+ decoder_skip.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
+ decoder_skip.SetSkipAttributeTransform(draco::GeometryAttribute::NORMAL);
+
+ // Decode the input data into a geometry.
+ buffer.Init(encoder_buffer.data(), encoder_buffer.size());
+ std::unique_ptr mesh_skip =
+ decoder_skip.DecodeMeshFromBuffer(&buffer).value();
+ ASSERT_NE(mesh_skip, nullptr);
+
+ // Compare the unique ids.
+ const draco::PointAttribute *const pos_att_no_skip =
+ mesh_no_skip->GetNamedAttribute(draco::GeometryAttribute::POSITION);
+ ASSERT_NE(pos_att_no_skip, nullptr);
+ ASSERT_EQ(pos_att_no_skip->data_type(), draco::DataType::DT_FLOAT32);
+
+ const draco::PointAttribute *const pos_att_skip =
+ mesh_skip->GetNamedAttribute(draco::GeometryAttribute::POSITION);
+ ASSERT_NE(pos_att_skip, nullptr);
+ ASSERT_EQ(pos_att_skip->data_type(), draco::DataType::DT_INT32);
+
+ const draco::PointAttribute *const norm_att_no_skip =
+ mesh_no_skip->GetNamedAttribute(draco::GeometryAttribute::NORMAL);
+ ASSERT_NE(norm_att_no_skip, nullptr);
+ ASSERT_EQ(norm_att_no_skip->data_type(), draco::DataType::DT_FLOAT32);
+
+ const draco::PointAttribute *const norm_att_skip =
+ mesh_skip->GetNamedAttribute(draco::GeometryAttribute::NORMAL);
+ ASSERT_NE(norm_att_skip, nullptr);
+ ASSERT_EQ(norm_att_skip->data_type(), draco::DataType::DT_INT32);
+
+ ASSERT_EQ(pos_att_skip->unique_id(), pos_att_no_skip->unique_id());
+ ASSERT_EQ(norm_att_skip->unique_id(), norm_att_no_skip->unique_id());
+ std::cout << pos_att_skip->unique_id() << " " << norm_att_skip->unique_id()
+ << std::endl;
+}
+
} // namespace
diff --git a/contrib/draco/src/draco/compression/draco_compression_options.cc b/contrib/draco/src/draco/compression/draco_compression_options.cc
new file mode 100644
index 000000000..08171c678
--- /dev/null
+++ b/contrib/draco/src/draco/compression/draco_compression_options.cc
@@ -0,0 +1,59 @@
+// Copyright 2022 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "draco/compression/draco_compression_options.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+
+namespace draco {
+
+SpatialQuantizationOptions::SpatialQuantizationOptions(int quantization_bits) {
+ SetQuantizationBits(quantization_bits);
+}
+
+void SpatialQuantizationOptions::SetQuantizationBits(int quantization_bits) {
+ mode_ = LOCAL_QUANTIZATION_BITS;
+ quantization_bits_ = quantization_bits;
+}
+
+bool SpatialQuantizationOptions::AreQuantizationBitsDefined() const {
+ return mode_ == LOCAL_QUANTIZATION_BITS;
+}
+
+SpatialQuantizationOptions &SpatialQuantizationOptions::SetGrid(float spacing) {
+ mode_ = GLOBAL_GRID;
+ spacing_ = spacing;
+ return *this;
+}
+
+bool SpatialQuantizationOptions::operator==(
+ const SpatialQuantizationOptions &other) const {
+ if (mode_ != other.mode_) {
+ return false;
+ }
+ if (mode_ == LOCAL_QUANTIZATION_BITS) {
+ if (quantization_bits_ != other.quantization_bits_) {
+ return false;
+ }
+ } else if (mode_ == GLOBAL_GRID) {
+ if (spacing_ != other.spacing_) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace draco
+
+#endif // DRACO_TRANSCODER_SUPPORTED
diff --git a/contrib/draco/src/draco/compression/draco_compression_options.h b/contrib/draco/src/draco/compression/draco_compression_options.h
new file mode 100644
index 000000000..31a4418ed
--- /dev/null
+++ b/contrib/draco/src/draco/compression/draco_compression_options.h
@@ -0,0 +1,141 @@
+// Copyright 2019 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_DRACO_COMPRESSION_OPTIONS_H_
+#define DRACO_COMPRESSION_DRACO_COMPRESSION_OPTIONS_H_
+
+#include "draco/draco_features.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+#include "draco/core/status.h"
+
+namespace draco {
+
+// Quantization options for positions. Currently there are two modes for
+// quantizing positions:
+//
+// 1. Quantization bits:
+// - User defined number of quantization bits that is evenly distributed
+// to cover the compressed geometry.
+// 2. Grid:
+// - Positions are snapped to a global grid defined by grid spacing.
+// - This method is primarily intended to be used when the location of
+// quantized vertices needs to be consistent between multiple
+// geometries.
+class SpatialQuantizationOptions {
+ public:
+ explicit SpatialQuantizationOptions(int quantization_bits);
+
+ // Sets quantization bits that are going to be used for the compressed
+ // geometry. If the geometry is a scene, the same number of quantization bits
+ // is going to be applied to each mesh of the scene. Quantized values are
+ // going to be distributed within the bounds of individual meshes.
+ void SetQuantizationBits(int quantization_bits);
+
+ // If this returns true, quantization_bits() should be used to get the
+ // desired number of quantization bits for compression. Otherwise the grid
+ // mode is selected and spacing() should be used to get the desired grid
+ // spacing.
+ bool AreQuantizationBitsDefined() const;
+ const int quantization_bits() const { return quantization_bits_; }
+
+ // Defines quantization grid used for the compressed geometry. All vertices
+ // are going to be snapped to the nearest grid vertex that corresponds to an
+ // integer quantized position. |spacing| defines the distance between two grid
+ // vertices. E.g. a grid with |spacing| = 10 would have grid vertices at
+ // locations {10 * i, 10 * j, 10 * k} where i, j, k are integer numbers.
+ SpatialQuantizationOptions &SetGrid(float spacing);
+
+ const float spacing() const { return spacing_; }
+
+ bool operator==(const SpatialQuantizationOptions &other) const;
+
+ private:
+ enum Mode { LOCAL_QUANTIZATION_BITS, GLOBAL_GRID };
+ Mode mode_ = LOCAL_QUANTIZATION_BITS;
+ int quantization_bits_; // Default quantization bits for positions.
+ float spacing_ = 0.f;
+};
+
+// TODO(fgalligan): Add support for unified_position_quantization.
+// Struct to hold Draco compression options.
+struct DracoCompressionOptions {
+ int compression_level = 7; // compression level [0-10], most=10, least=0.
+ SpatialQuantizationOptions quantization_position{11};
+ int quantization_bits_normal = 8;
+ int quantization_bits_tex_coord = 10;
+ int quantization_bits_color = 8;
+ int quantization_bits_generic = 8;
+ int quantization_bits_tangent = 8;
+ int quantization_bits_weight = 8;
+ bool find_non_degenerate_texture_quantization = false;
+
+ bool operator==(const DracoCompressionOptions &other) const {
+ return compression_level == other.compression_level &&
+ quantization_position == other.quantization_position &&
+ quantization_bits_normal == other.quantization_bits_normal &&
+ quantization_bits_tex_coord == other.quantization_bits_tex_coord &&
+ quantization_bits_color == other.quantization_bits_color &&
+ quantization_bits_generic == other.quantization_bits_generic &&
+ quantization_bits_tangent == other.quantization_bits_tangent &&
+ quantization_bits_weight == other.quantization_bits_weight &&
+ find_non_degenerate_texture_quantization ==
+ other.find_non_degenerate_texture_quantization;
+ }
+
+ bool operator!=(const DracoCompressionOptions &other) const {
+ return !(*this == other);
+ }
+
+ Status Check() const {
+ DRACO_RETURN_IF_ERROR(
+ Validate("Compression level", compression_level, 0, 10));
+ if (quantization_position.AreQuantizationBitsDefined()) {
+ DRACO_RETURN_IF_ERROR(Validate("Position quantization",
+ quantization_position.quantization_bits(),
+ 0, 30));
+ } else {
+ if (quantization_position.spacing() <= 0.f) {
+ return ErrorStatus("Position quantization spacing is invalid.");
+ }
+ }
+ DRACO_RETURN_IF_ERROR(
+ Validate("Normals quantization", quantization_bits_normal, 0, 30));
+ DRACO_RETURN_IF_ERROR(
+ Validate("Tex coord quantization", quantization_bits_tex_coord, 0, 30));
+ DRACO_RETURN_IF_ERROR(
+ Validate("Color quantization", quantization_bits_color, 0, 30));
+ DRACO_RETURN_IF_ERROR(
+ Validate("Generic quantization", quantization_bits_generic, 0, 30));
+ DRACO_RETURN_IF_ERROR(
+ Validate("Tangent quantization", quantization_bits_tangent, 0, 30));
+ DRACO_RETURN_IF_ERROR(
+ Validate("Weights quantization", quantization_bits_weight, 0, 30));
+ return OkStatus();
+ }
+
+ static Status Validate(const std::string &name, int value, int min, int max) {
+ if (value < min || value > max) {
+ const std::string range =
+ "[" + std::to_string(min) + "-" + std::to_string(max) + "].";
+ return Status(Status::DRACO_ERROR, name + " is out of range " + range);
+ }
+ return OkStatus();
+ }
+};
+
+} // namespace draco
+
+#endif // DRACO_TRANSCODER_SUPPORTED
+#endif // DRACO_COMPRESSION_DRACO_COMPRESSION_OPTIONS_H_
diff --git a/contrib/draco/src/draco/compression/draco_compression_options_test.cc b/contrib/draco/src/draco/compression/draco_compression_options_test.cc
new file mode 100644
index 000000000..415295211
--- /dev/null
+++ b/contrib/draco/src/draco/compression/draco_compression_options_test.cc
@@ -0,0 +1,45 @@
+#include "draco/compression/draco_compression_options.h"
+
+#include "draco/core/draco_test_utils.h"
+
+#ifdef DRACO_TRANSCODER_SUPPORTED
+
+namespace {
+
+TEST(DracoCompressionOptionsTest, TestPositionQuantizationBits) {
+ // Test verifies that we can define draco compression options using
+ // quantization bits.
+ draco::SpatialQuantizationOptions options(10);
+
+ // Quantization bits should be used by default.
+ ASSERT_TRUE(options.AreQuantizationBitsDefined());
+ ASSERT_EQ(options.quantization_bits(), 10);
+
+ // Change the quantization bits.
+ options.SetQuantizationBits(9);
+ ASSERT_TRUE(options.AreQuantizationBitsDefined());
+ ASSERT_EQ(options.quantization_bits(), 9);
+
+ // If we select the grid, quantization bits should not be used.
+ options.SetGrid(0.5f);
+ ASSERT_FALSE(options.AreQuantizationBitsDefined());
+}
+
+TEST(DracoCompressionOptionsTest, TestPositionQuantizationGrid) {
+ // Test verifies that we can define draco compression options using
+ // quantization grid.
+ draco::SpatialQuantizationOptions options(10);
+
+ // Quantization bits should be used by default.
+ ASSERT_TRUE(options.AreQuantizationBitsDefined());
+
+ // Set the grid parameters.
+ options.SetGrid(0.25f);
+ ASSERT_FALSE(options.AreQuantizationBitsDefined());
+
+ ASSERT_EQ(options.spacing(), 0.25f);
+}
+
+} // namespace
+
+#endif // DRACO_TRANSCODER_SUPPORTED
diff --git a/contrib/draco/src/draco/compression/encode.h b/contrib/draco/src/draco/compression/encode.h
index bce8b34c2..00ccb9b2e 100644
--- a/contrib/draco/src/draco/compression/encode.h
+++ b/contrib/draco/src/draco/compression/encode.h
@@ -129,7 +129,6 @@ class Encoder
// call of EncodePointCloudToBuffer or EncodeMeshToBuffer is going to fail.
void SetEncodingMethod(int encoding_method);
- protected:
// Creates encoder options for the expert encoder used during the actual
// encoding.
EncoderOptions CreateExpertEncoderOptions(const PointCloud &pc) const;
diff --git a/contrib/draco/src/draco/compression/encode_base.h b/contrib/draco/src/draco/compression/encode_base.h
index c501bc4fa..6211efc22 100644
--- a/contrib/draco/src/draco/compression/encode_base.h
+++ b/contrib/draco/src/draco/compression/encode_base.h
@@ -98,7 +98,7 @@ class EncoderBase {
"Invalid prediction scheme for attribute type.");
}
}
- // TODO(hemmer): Try to enable more prediction schemes for normals.
+ // TODO(b/199760123): Try to enable more prediction schemes for normals.
if (att_type == GeometryAttribute::NORMAL) {
if (!(prediction_scheme == PREDICTION_DIFFERENCE ||
prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL)) {
diff --git a/contrib/draco/src/draco/compression/encode_test.cc b/contrib/draco/src/draco/compression/encode_test.cc
index fde4f6f5b..00d834703 100644
--- a/contrib/draco/src/draco/compression/encode_test.cc
+++ b/contrib/draco/src/draco/compression/encode_test.cc
@@ -26,6 +26,7 @@
#include "draco/core/draco_test_base.h"
#include "draco/core/draco_test_utils.h"
#include "draco/core/vector_d.h"
+#include "draco/io/file_utils.h"
#include "draco/io/obj_decoder.h"
#include "draco/mesh/triangle_soup_mesh_builder.h"
#include "draco/point_cloud/point_cloud_builder.h"
@@ -213,16 +214,14 @@ class EncodeTest : public ::testing::Test {
draco::Decoder decoder;
if (mesh) {
- auto maybe_mesh = decoder.DecodeMeshFromBuffer(&decoder_buffer);
- ASSERT_TRUE(maybe_mesh.ok());
- auto decoded_mesh = std::move(maybe_mesh).value();
+ DRACO_ASSIGN_OR_ASSERT(auto decoded_mesh,
+ decoder.DecodeMeshFromBuffer(&decoder_buffer));
ASSERT_NE(decoded_mesh, nullptr);
ASSERT_EQ(decoded_mesh->num_points(), encoder.num_encoded_points());
ASSERT_EQ(decoded_mesh->num_faces(), encoder.num_encoded_faces());
} else {
- auto maybe_pc = decoder.DecodePointCloudFromBuffer(&decoder_buffer);
- ASSERT_TRUE(maybe_pc.ok());
- auto decoded_pc = std::move(maybe_pc).value();
+ DRACO_ASSIGN_OR_ASSERT(
+ auto decoded_pc, decoder.DecodePointCloudFromBuffer(&decoder_buffer));
ASSERT_EQ(decoded_pc->num_points(), encoder.num_encoded_points());
}
}
@@ -274,7 +273,7 @@ TEST_F(EncodeTest, TestLinesObj) {
encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16);
draco::EncoderBuffer buffer;
- ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.EncodePointCloudToBuffer(*pc, &buffer));
}
TEST_F(EncodeTest, TestQuantizedInfinity) {
@@ -315,7 +314,7 @@ TEST_F(EncodeTest, TestUnquantizedInfinity) {
encoder.SetEncodingMethod(draco::POINT_CLOUD_SEQUENTIAL_ENCODING);
draco::EncoderBuffer buffer;
- ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.EncodePointCloudToBuffer(*pc, &buffer));
}
TEST_F(EncodeTest, TestQuantizedAndUnquantizedAttributes) {
@@ -330,7 +329,7 @@ TEST_F(EncodeTest, TestQuantizedAndUnquantizedAttributes) {
encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11);
encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 0);
draco::EncoderBuffer buffer;
- ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.EncodePointCloudToBuffer(*pc, &buffer));
}
TEST_F(EncodeTest, TestKdTreeEncoding) {
@@ -348,7 +347,7 @@ TEST_F(EncodeTest, TestKdTreeEncoding) {
// Now set quantization for the position attribute which should make
// the encoder happy.
encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16);
- ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.EncodePointCloudToBuffer(*pc, &buffer));
}
TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntries) {
@@ -373,7 +372,7 @@ TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntriesNotSet) {
draco::EncoderBuffer buffer;
draco::Encoder encoder;
- ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer));
ASSERT_EQ(encoder.num_encoded_points(), 0);
ASSERT_EQ(encoder.num_encoded_faces(), 0);
}
@@ -404,4 +403,170 @@ TEST_F(EncodeTest, TestNoPosQuantizationNormalCoding) {
ASSERT_NE(decoded_mesh, nullptr);
}
+#ifdef DRACO_TRANSCODER_SUPPORTED
+TEST_F(EncodeTest, TestDracoCompressionOptions) {
+ // This test verifies that we can set the encoder's compression options via
+ // draco::Mesh's compression options.
+ const auto mesh = draco::ReadMeshFromTestFile("test_nm.obj");
+ ASSERT_NE(mesh, nullptr);
+
+ // First set compression level and quantization manually.
+ draco::Encoder encoder_manual;
+ draco::EncoderBuffer buffer_manual;
+ encoder_manual.SetAttributeQuantization(draco::GeometryAttribute::POSITION,
+ 8);
+ encoder_manual.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 7);
+ encoder_manual.SetSpeedOptions(4, 4);
+
+ DRACO_ASSERT_OK(encoder_manual.EncodeMeshToBuffer(*mesh, &buffer_manual));
+
+ // Now do the same with options provided via DracoCompressionOptions.
+ draco::DracoCompressionOptions compression_options;
+ compression_options.compression_level = 6;
+ compression_options.quantization_position.SetQuantizationBits(8);
+ compression_options.quantization_bits_normal = 7;
+ mesh->SetCompressionOptions(compression_options);
+ mesh->SetCompressionEnabled(true);
+
+ draco::Encoder encoder_auto;
+ draco::EncoderBuffer buffer_auto;
+ DRACO_ASSERT_OK(encoder_auto.EncodeMeshToBuffer(*mesh, &buffer_auto));
+
+ // Ensure that both encoders produce the same result.
+ ASSERT_EQ(buffer_manual.size(), buffer_auto.size());
+
+ // Now change some of the mesh's compression settings and ensure the
+ // compression changes as well.
+ compression_options.compression_level = 7;
+ mesh->SetCompressionOptions(compression_options);
+ buffer_auto.Clear();
+ DRACO_ASSERT_OK(encoder_auto.EncodeMeshToBuffer(*mesh, &buffer_auto));
+ ASSERT_NE(buffer_manual.size(), buffer_auto.size());
+
+ // Check that |mesh| compression options do not override the encoder options.
+ mesh->GetCompressionOptions().compression_level = 10;
+ mesh->GetCompressionOptions().quantization_position.SetQuantizationBits(10);
+ mesh->GetCompressionOptions().quantization_bits_normal = 10;
+ draco::EncoderBuffer buffer;
+ DRACO_ASSERT_OK(encoder_manual.EncodeMeshToBuffer(*mesh, &buffer));
+ ASSERT_EQ(buffer.size(), buffer_manual.size());
+}
+
+TEST_F(EncodeTest, TestDracoCompressionOptionsManualOverride) {
+ // This test verifies that we can use encoder's option to override compression
+ // options provided in draco::Mesh's compression options.
+ const auto mesh = draco::ReadMeshFromTestFile("test_nm.obj");
+ ASSERT_NE(mesh, nullptr);
+
+ // Set some compression options.
+ draco::DracoCompressionOptions compression_options;
+ compression_options.compression_level = 6;
+ compression_options.quantization_position.SetQuantizationBits(8);
+ compression_options.quantization_bits_normal = 7;
+ mesh->SetCompressionOptions(compression_options);
+ mesh->SetCompressionEnabled(true);
+
+ draco::Encoder encoder;
+ draco::EncoderBuffer buffer_no_override;
+ DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer_no_override));
+
+ // Now override some options and ensure the compression is different.
+ encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 5);
+ draco::EncoderBuffer buffer_with_override;
+ DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer_with_override));
+ ASSERT_LT(buffer_with_override.size(), buffer_no_override.size());
+}
+
+TEST_F(EncodeTest, TestDracoCompressionOptionsGridQuantization) {
+ // Test verifies that we can set position quantization via grid spacing.
+
+ // 1x1x1 cube.
+ const auto mesh = draco::ReadMeshFromTestFile("cube_att.obj");
+ ASSERT_NE(mesh, nullptr);
+ mesh->SetCompressionEnabled(true);
+
+ // Set grid quantization for positions.
+ draco::DracoCompressionOptions compression_options;
+ // This should result in 10x10x10 quantization.
+ compression_options.quantization_position.SetGrid(0.1);
+ mesh->SetCompressionOptions(compression_options);
+
+ draco::ExpertEncoder encoder(*mesh);
+ draco::EncoderBuffer buffer;
+ DRACO_ASSERT_OK(encoder.EncodeToBuffer(&buffer));
+
+ // The grid options should be reflected in the |encoder|. Check that the
+ // computed values are correct.
+ const int pos_att_id =
+ mesh->GetNamedAttributeId(draco::GeometryAttribute::POSITION);
+ draco::Vector3f origin;
+ encoder.options().GetAttributeVector(pos_att_id, "quantization_origin", 3,
+ &origin[0]);
+ ASSERT_EQ(origin, draco::Vector3f(0.f, 0.f, 0.f));
+
+ // We need 4 quantization bits (for 10 values).
+ ASSERT_EQ(
+ encoder.options().GetAttributeInt(pos_att_id, "quantization_bits", -1),
+ 4);
+
+ // The quantization range should be ((1 << quantization_bits) - 1) * spacing.
+ ASSERT_NEAR(encoder.options().GetAttributeFloat(pos_att_id,
+ "quantization_range", 0.f),
+ 15.f * 0.1f, 1e-6f);
+}
+
+TEST_F(EncodeTest, TestDracoCompressionOptionsGridQuantizationWithOffset) {
+ // Test verifies that we can set position quantization via grid spacing when
+ // the geometry is not perfectly aligned with the quantization grid.
+
+ // 1x1x1 cube.
+ const auto mesh = draco::ReadMeshFromTestFile("cube_att.obj");
+ ASSERT_NE(mesh, nullptr);
+
+ // Move all positions a bit.
+ auto *pos_att = mesh->attribute(
+ mesh->GetNamedAttributeId(draco::GeometryAttribute::POSITION));
+ for (draco::AttributeValueIndex avi(0); avi < pos_att->size(); ++avi) {
+ draco::Vector3f pos;
+ pos_att->GetValue(avi, &pos[0]);
+ pos = pos + draco::Vector3f(-0.55f, 0.65f, 10.75f);
+ pos_att->SetAttributeValue(avi, &pos[0]);
+ }
+
+ mesh->SetCompressionEnabled(true);
+
+ // Set grid quantization for positions.
+ draco::DracoCompressionOptions compression_options;
+ // This should result in 16x16x16 quantization if the grid was perfectly
+ // aligned but since it is not we should expect 17 or 18 values per component.
+ compression_options.quantization_position.SetGrid(0.0625f);
+ mesh->SetCompressionOptions(compression_options);
+
+ draco::ExpertEncoder encoder(*mesh);
+ draco::EncoderBuffer buffer;
+ DRACO_ASSERT_OK(encoder.EncodeToBuffer(&buffer));
+
+ // The grid options should be reflected in the |encoder|. Check that the
+ // computed values are correct.
+ const int pos_att_id =
+ mesh->GetNamedAttributeId(draco::GeometryAttribute::POSITION);
+ draco::Vector3f origin;
+ encoder.options().GetAttributeVector(pos_att_id, "quantization_origin", 3,
+ &origin[0]);
+ // The origin is the first lower value on the quantization grid for each
+ // component of the mesh.
+ ASSERT_EQ(origin, draco::Vector3f(-0.5625f, 0.625f, 10.75f));
+
+ // We need 5 quantization bits (for 17-18 values).
+ ASSERT_EQ(
+ encoder.options().GetAttributeInt(pos_att_id, "quantization_bits", -1),
+ 5);
+
+ // The quantization range should be ((1 << quantization_bits) - 1) * spacing.
+ ASSERT_NEAR(encoder.options().GetAttributeFloat(pos_att_id,
+ "quantization_range", 0.f),
+ 31.f * 0.0625f, 1e-6f);
+}
+#endif // DRACO_TRANSCODER_SUPPORTED
+
} // namespace
diff --git a/contrib/draco/src/draco/compression/entropy/ans.h b/contrib/draco/src/draco/compression/entropy/ans.h
index c71d58975..313546fee 100644
--- a/contrib/draco/src/draco/compression/entropy/ans.h
+++ b/contrib/draco/src/draco/compression/entropy/ans.h
@@ -391,7 +391,6 @@ class RAnsEncoder {
ans_.buf[ans_.buf_offset++] = ans_.state % DRACO_ANS_IO_BASE;
ans_.state /= DRACO_ANS_IO_BASE;
}
- // TODO(ostava): The division and multiplication should be optimized.
ans_.state =
(ans_.state / p) * rans_precision + ans_.state % p + sym->cum_prob;
}
diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h
index 10cdc6781..3b408c079 100644
--- a/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h
+++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h
@@ -75,6 +75,13 @@ bool RAnsSymbolDecoder::Create(
return false;
}
}
+ // Check that decoded number of symbols is not unreasonably high. Remaining
+ // buffer size must be at least |num_symbols| / 64 bytes to contain the
+ // probability table. The |prob_data| below is one byte but it can be
+ // theoretically stored for each 64th symbol.
+ if (num_symbols_ / 64 > buffer->remaining_size()) {
+ return false;
+ }
probability_table_.resize(num_symbols_);
if (num_symbols_ == 0) {
return true;
diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h
index 4e07ec871..4b738b50a 100644
--- a/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h
+++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h
@@ -125,8 +125,8 @@ bool RAnsSymbolEncoder::Create(
for (int i = 0; i < num_symbols; ++i) {
sorted_probabilities[i] = i;
}
- std::sort(sorted_probabilities.begin(), sorted_probabilities.end(),
- ProbabilityLess(&probability_table_));
+ std::stable_sort(sorted_probabilities.begin(), sorted_probabilities.end(),
+ ProbabilityLess(&probability_table_));
if (total_rans_prob < rans_precision_) {
// This happens rather infrequently, just add the extra needed precision
// to the most frequent symbol.
diff --git a/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc b/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc
index 93d29971c..79e811818 100644
--- a/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc
+++ b/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc
@@ -72,7 +72,7 @@ bool DecodeTaggedSymbols(uint32_t num_values, int num_components,
int value_id = 0;
for (uint32_t i = 0; i < num_values; i += num_components) {
// Decode the tag.
- const int bit_length = tag_decoder.DecodeSymbol();
+ const uint32_t bit_length = tag_decoder.DecodeSymbol();
// Decode the actual value.
for (int j = 0; j < num_components; ++j) {
uint32_t val;
diff --git a/contrib/draco/src/draco/compression/expert_encode.cc b/contrib/draco/src/draco/compression/expert_encode.cc
index f9aec15eb..a3e649193 100644
--- a/contrib/draco/src/draco/compression/expert_encode.cc
+++ b/contrib/draco/src/draco/compression/expert_encode.cc
@@ -14,6 +14,12 @@
//
#include "draco/compression/expert_encode.h"
+#include
+#include
+#include
+#include
+#include
+
#include "draco/compression/mesh/mesh_edgebreaker_encoder.h"
#include "draco/compression/mesh/mesh_sequential_encoder.h"
#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
@@ -21,6 +27,9 @@
#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
#endif
+#ifdef DRACO_TRANSCODER_SUPPORTED
+#include "draco/core/bit_utils.h"
+#endif
namespace draco {
ExpertEncoder::ExpertEncoder(const PointCloud &point_cloud)
@@ -101,6 +110,11 @@ Status ExpertEncoder::EncodePointCloudToBuffer(const PointCloud &pc,
Status ExpertEncoder::EncodeMeshToBuffer(const Mesh &m,
EncoderBuffer *out_buffer) {
+#ifdef DRACO_TRANSCODER_SUPPORTED
+ // Apply DracoCompressionOptions associated with the mesh.
+ DRACO_RETURN_IF_ERROR(ApplyCompressionOptions(m));
+#endif // DRACO_TRANSCODER_SUPPORTED
+
std::unique_ptr encoder;
// Select the encoding method only based on the provided options.
int encoding_method = options().GetGlobalInt("encoding_method", -1);
@@ -118,6 +132,7 @@ Status ExpertEncoder::EncodeMeshToBuffer(const Mesh &m,
encoder = std::unique_ptr(new MeshSequentialEncoder());
}
encoder->SetMesh(m);
+
DRACO_RETURN_IF_ERROR(encoder->Encode(options(), out_buffer));
set_num_encoded_points(encoder->num_encoded_points());
@@ -179,4 +194,107 @@ Status ExpertEncoder::SetAttributePredictionScheme(
return status;
}
+#ifdef DRACO_TRANSCODER_SUPPORTED
+Status ExpertEncoder::ApplyCompressionOptions(const Mesh &mesh) {
+ if (!mesh.IsCompressionEnabled()) {
+ return OkStatus();
+ }
+ const auto &compression_options = mesh.GetCompressionOptions();
+
+ // Set any encoder options that haven't been explicitly set by users (don't
+ // override existing options).
+ if (!options().IsSpeedSet()) {
+ options().SetSpeed(10 - compression_options.compression_level,
+ 10 - compression_options.compression_level);
+ }
+
+ for (int ai = 0; ai < mesh.num_attributes(); ++ai) {
+ if (options().IsAttributeOptionSet(ai, "quantization_bits")) {
+ continue; // Don't override options that have been set.
+ }
+ int quantization_bits = 0;
+ const auto type = mesh.attribute(ai)->attribute_type();
+ switch (type) {
+ case GeometryAttribute::POSITION:
+ if (compression_options.quantization_position
+ .AreQuantizationBitsDefined()) {
+ quantization_bits =
+ compression_options.quantization_position.quantization_bits();
+ } else {
+ DRACO_RETURN_IF_ERROR(ApplyGridQuantization(mesh, ai));
+ }
+ break;
+ case GeometryAttribute::TEX_COORD:
+ quantization_bits = compression_options.quantization_bits_tex_coord;
+ break;
+ case GeometryAttribute::NORMAL:
+ quantization_bits = compression_options.quantization_bits_normal;
+ break;
+ case GeometryAttribute::COLOR:
+ quantization_bits = compression_options.quantization_bits_color;
+ break;
+ case GeometryAttribute::TANGENT:
+ quantization_bits = compression_options.quantization_bits_tangent;
+ break;
+ case GeometryAttribute::WEIGHTS:
+ quantization_bits = compression_options.quantization_bits_weight;
+ break;
+ case GeometryAttribute::GENERIC:
+ quantization_bits = compression_options.quantization_bits_generic;
+ break;
+ default:
+ break;
+ }
+ if (quantization_bits > 0) {
+ options().SetAttributeInt(ai, "quantization_bits", quantization_bits);
+ }
+ }
+ return OkStatus();
+}
+
+Status ExpertEncoder::ApplyGridQuantization(const Mesh &mesh,
+ int attribute_index) {
+ const auto compression_options = mesh.GetCompressionOptions();
+ if (mesh.attribute(attribute_index)->num_components() != 3) {
+ return ErrorStatus(
+ "Invalid number of components: Grid quantization is currently "
+ "supported only for 3D positions.");
+ }
+ const float spacing = compression_options.quantization_position.spacing();
+ // Compute quantization properties based on the grid spacing.
+ const auto &bbox = mesh.ComputeBoundingBox();
+ // Snap min and max points of the |bbox| to the quantization grid vertices.
+ Vector3f min_pos;
+ int num_values = 0; // Number of values that we need to encode.
+ for (int c = 0; c < 3; ++c) {
+ // Min / max position on grid vertices in grid coordinates.
+ const float min_grid_pos = floor(bbox.GetMinPoint()[c] / spacing);
+ const float max_grid_pos = ceil(bbox.GetMaxPoint()[c] / spacing);
+
+ // Min pos on grid vertex in mesh coordinates.
+ min_pos[c] = min_grid_pos * spacing;
+
+ const float component_num_values =
+ static_cast(max_grid_pos) - static_cast(min_grid_pos) + 1;
+ if (component_num_values > num_values) {
+ num_values = component_num_values;
+ }
+ }
+ // Now compute the number of bits needed to encode |num_values|.
+ int bits = MostSignificantBit(num_values);
+ if ((1 << bits) < num_values) {
+ // If the |num_values| is larger than number of values representable by
+ // |bits|, we need to use one more bit. This will be almost always true
+ // unless |num_values| was equal to 1 << |bits|.
+ bits++;
+ }
+ // Compute the range in mesh coordinates that matches the quantization bits.
+ // Note there are n-1 intervals between the |n| quantization values.
+ const float range = ((1 << bits) - 1) * spacing;
+ SetAttributeExplicitQuantization(attribute_index, bits, 3, min_pos.data(),
+ range);
+ return OkStatus();
+}
+#endif // DRACO_TRANSCODER_SUPPORTED
+
} // namespace draco
diff --git a/contrib/draco/src/draco/compression/expert_encode.h b/contrib/draco/src/draco/compression/expert_encode.h
index ea59393d3..5c1485e1e 100644
--- a/contrib/draco/src/draco/compression/expert_encode.h
+++ b/contrib/draco/src/draco/compression/expert_encode.h
@@ -138,6 +138,12 @@ class ExpertEncoder : public EncoderBase {
Status EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer);
+#ifdef DRACO_TRANSCODER_SUPPORTED
+ // Applies compression options stored in |mesh|.
+ Status ApplyCompressionOptions(const Mesh &mesh);
+ Status ApplyGridQuantization(const Mesh &mesh, int attribute_index);
+#endif // DRACO_TRANSCODER_SUPPORTED
+
const PointCloud *point_cloud_;
const Mesh *mesh_;
};
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc
index 0bbbea4af..21ad9959c 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc
+++ b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_decoder_impl.cc
@@ -454,7 +454,7 @@ bool MeshEdgebreakerDecoderImpl::DecodeConnectivity() {
#endif
// Decode connectivity of non-position attributes.
- if (attribute_data_.size() > 0) {
+ if (!attribute_data_.empty()) {
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 1)) {
for (CornerIndex ci(0); ci < corner_table_->num_corners(); ci += 3) {
@@ -484,7 +484,10 @@ bool MeshEdgebreakerDecoderImpl::DecodeConnectivity() {
attribute_data_[i].connectivity_data.AddSeamEdge(CornerIndex(c));
}
// Recompute vertices from the newly added seam edges.
- attribute_data_[i].connectivity_data.RecomputeVertices(nullptr, nullptr);
+ if (!attribute_data_[i].connectivity_data.RecomputeVertices(nullptr,
+ nullptr)) {
+ return false;
+ }
}
pos_encoding_data_.Init(corner_table_->num_vertices());
@@ -574,6 +577,17 @@ int MeshEdgebreakerDecoderImpl::DecodeConnectivity(
const CornerIndex corner_b =
corner_table_->Next(corner_table_->LeftMostCorner(vertex_x));
+ if (corner_a == corner_b) {
+ // All matched corners must be different.
+ return -1;
+ }
+ if (corner_table_->Opposite(corner_a) != kInvalidCornerIndex ||
+ corner_table_->Opposite(corner_b) != kInvalidCornerIndex) {
+ // One of the corners is already opposite to an existing face, which
+ // should not happen unless the input was tampered with.
+ return -1;
+ }
+
// New tip corner.
const CornerIndex corner(3 * face.value());
// Update opposite corner mappings.
@@ -616,6 +630,11 @@ int MeshEdgebreakerDecoderImpl::DecodeConnectivity(
return -1;
}
const CornerIndex corner_a = active_corner_stack.back();
+ if (corner_table_->Opposite(corner_a) != kInvalidCornerIndex) {
+ // Active corner is already opposite to an existing face, which should
+ // not happen unless the input was tampered with.
+ return -1;
+ }
// First corner on the new face is either corner "l" or "r".
const CornerIndex corner(3 * face.value());
@@ -681,10 +700,14 @@ int MeshEdgebreakerDecoderImpl::DecodeConnectivity(
}
const CornerIndex corner_a = active_corner_stack.back();
+ if (corner_a == corner_b) {
+ // All matched corners must be different.
+ return -1;
+ }
if (corner_table_->Opposite(corner_a) != kInvalidCornerIndex ||
corner_table_->Opposite(corner_b) != kInvalidCornerIndex) {
// One of the corners is already opposite to an existing face, which
- // should not happen unless the input was tempered with.
+ // should not happen unless the input was tampered with.
return -1;
}
@@ -713,9 +736,15 @@ int MeshEdgebreakerDecoderImpl::DecodeConnectivity(
// Also update the vertex id at corner "n" and all corners that are
// connected to it in the CCW direction.
+ const CornerIndex first_corner = corner_n;
while (corner_n != kInvalidCornerIndex) {
corner_table_->MapCornerToVertex(corner_n, vertex_p);
corner_n = corner_table_->SwingLeft(corner_n);
+ if (corner_n == first_corner) {
+ // We reached the start again which should not happen for split
+ // symbols.
+ return -1;
+ }
}
// Make sure the old vertex n is now mapped to an invalid corner (make it
// isolated).
@@ -800,7 +829,7 @@ int MeshEdgebreakerDecoderImpl::DecodeConnectivity(
return -1; // Unexpected number of decoded vertices.
}
// Decode start faces and connect them to the faces from the active stack.
- while (active_corner_stack.size() > 0) {
+ while (!active_corner_stack.empty()) {
const CornerIndex corner = active_corner_stack.back();
active_corner_stack.pop_back();
const bool interior_face =
@@ -842,6 +871,18 @@ int MeshEdgebreakerDecoderImpl::DecodeConnectivity(
const CornerIndex corner_c =
corner_table_->Next(corner_table_->LeftMostCorner(vert_x));
+ if (corner == corner_b || corner == corner_c || corner_b == corner_c) {
+ // All matched corners must be different.
+ return -1;
+ }
+ if (corner_table_->Opposite(corner) != kInvalidCornerIndex ||
+ corner_table_->Opposite(corner_b) != kInvalidCornerIndex ||
+ corner_table_->Opposite(corner_c) != kInvalidCornerIndex) {
+ // One of the corners is already opposite to an existing face, which
+ // should not happen unless the input was tampered with.
+ return -1;
+ }
+
const VertexIndex vert_p =
corner_table_->Vertex(corner_table_->Next(corner_c));
@@ -894,6 +935,11 @@ int MeshEdgebreakerDecoderImpl::DecodeConnectivity(
VertexCornersIterator vcit(corner_table_.get(), src_vert);
for (; !vcit.End(); ++vcit) {
const CornerIndex cid = vcit.Corner();
+ if (corner_table_->Vertex(cid) != src_vert) {
+ // Vertex mapped to |cid| was not |src_vert|. This indicates corrupted
+ // data and we should terminate the decoding.
+ return -1;
+ }
corner_table_->MapCornerToVertex(cid, invalid_vert);
}
corner_table_->SetLeftMostCorner(invalid_vert,
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc
index 5aff5d8cc..a7f381480 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc
+++ b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder.cc
@@ -31,7 +31,6 @@ bool MeshEdgebreakerEncoder::InitializeEncoder() {
impl_ = nullptr;
// For tiny meshes it's usually better to use the basic edgebreaker as the
// overhead of the predictive one may turn out to be too big.
- // TODO(b/111065939): Check if this can be improved.
const bool is_tiny_mesh = mesh()->num_faces() < 1000;
int selected_edgebreaker_method =
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc
index 0791dc670..4bf6aa920 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc
+++ b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc
@@ -408,7 +408,7 @@ Status MeshEdgebreakerEncoderImpl::EncodeConnectivity() {
init_face_connectivity_corners.begin(),
init_face_connectivity_corners.end());
// Encode connectivity for all non-position attributes.
- if (attribute_data_.size() > 0) {
+ if (!attribute_data_.empty()) {
// Use the same order of corner that will be used by the decoder.
visited_faces_.assign(mesh_->num_faces(), false);
for (CornerIndex ci : processed_connectivity_corners_) {
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h
index fb3377163..979e1d373 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h
+++ b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h
@@ -177,7 +177,6 @@ class MeshEdgebreakerEncoderImpl : public MeshEdgebreakerEncoderImplInterface {
uint32_t num_split_symbols_;
// Struct holding data used for encoding each non-position attribute.
- // TODO(ostava): This should be probably renamed to something better.
struct AttributeData {
AttributeData() : attribute_index(-1), is_connectivity_used(true) {}
int attribute_index;
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc
index 831388245..523303b09 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc
+++ b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_encoding_test.cc
@@ -44,7 +44,7 @@ class MeshEdgebreakerEncodingTest : public ::testing::Test {
EncoderOptions encoder_options = EncoderOptions::CreateDefaultOptions();
encoder_options.SetSpeed(10 - compression_level, 10 - compression_level);
encoder.SetMesh(*mesh);
- ASSERT_TRUE(encoder.Encode(encoder_options, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.Encode(encoder_options, &buffer));
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
@@ -52,15 +52,14 @@ class MeshEdgebreakerEncodingTest : public ::testing::Test {
std::unique_ptr decoded_mesh(new Mesh());
DecoderOptions dec_options;
- ASSERT_TRUE(
- decoder.Decode(dec_options, &dec_buffer, decoded_mesh.get()).ok());
+ DRACO_ASSERT_OK(
+ decoder.Decode(dec_options, &dec_buffer, decoded_mesh.get()));
// Cleanup the input mesh to make sure that input and output can be
// compared (edgebreaker method discards degenerated triangles and isolated
// vertices).
const MeshCleanupOptions options;
- MeshCleanup cleanup;
- ASSERT_TRUE(cleanup(mesh, options)) << "Failed to clean the input mesh.";
+ DRACO_ASSERT_OK(MeshCleanup::Cleanup(mesh, options));
MeshAreEquivalent eq;
ASSERT_TRUE(eq(*mesh, *decoded_mesh.get()))
@@ -102,8 +101,8 @@ TEST_F(MeshEdgebreakerEncodingTest, TestEncoderReuse) {
EncoderOptions encoder_options = EncoderOptions::CreateDefaultOptions();
encoder.SetMesh(*mesh);
EncoderBuffer buffer_0, buffer_1;
- ASSERT_TRUE(encoder.Encode(encoder_options, &buffer_0).ok());
- ASSERT_TRUE(encoder.Encode(encoder_options, &buffer_1).ok());
+ DRACO_ASSERT_OK(encoder.Encode(encoder_options, &buffer_0));
+ DRACO_ASSERT_OK(encoder.Encode(encoder_options, &buffer_1));
// Make sure both buffer are identical.
ASSERT_EQ(buffer_0.size(), buffer_1.size());
@@ -123,7 +122,7 @@ TEST_F(MeshEdgebreakerEncodingTest, TestDecoderReuse) {
EncoderOptions encoder_options = EncoderOptions::CreateDefaultOptions();
encoder.SetMesh(*mesh);
EncoderBuffer buffer;
- ASSERT_TRUE(encoder.Encode(encoder_options, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.Encode(encoder_options, &buffer));
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
@@ -133,13 +132,13 @@ TEST_F(MeshEdgebreakerEncodingTest, TestDecoderReuse) {
// Decode the mesh two times.
std::unique_ptr decoded_mesh_0(new Mesh());
DecoderOptions dec_options;
- ASSERT_TRUE(
- decoder.Decode(dec_options, &dec_buffer, decoded_mesh_0.get()).ok());
+ DRACO_ASSERT_OK(
+ decoder.Decode(dec_options, &dec_buffer, decoded_mesh_0.get()));
dec_buffer.Init(buffer.data(), buffer.size());
std::unique_ptr decoded_mesh_1(new Mesh());
- ASSERT_TRUE(
- decoder.Decode(dec_options, &dec_buffer, decoded_mesh_1.get()).ok());
+ DRACO_ASSERT_OK(
+ decoder.Decode(dec_options, &dec_buffer, decoded_mesh_1.get()));
// Make sure both of the meshes are identical.
MeshAreEquivalent eq;
@@ -169,7 +168,7 @@ TEST_F(MeshEdgebreakerEncodingTest, TestSingleConnectivityEncoding) {
encoder.SetAttributeQuantization(GeometryAttribute::TEX_COORD, 8);
encoder.SetAttributeQuantization(GeometryAttribute::NORMAL, 8);
encoder.SetEncodingMethod(MESH_EDGEBREAKER_ENCODING);
- ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer));
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
@@ -216,7 +215,7 @@ TEST_F(MeshEdgebreakerEncodingTest, TestWrongAttributeOrder) {
encoder.SetAttributeQuantization(GeometryAttribute::POSITION, 8);
encoder.SetAttributeQuantization(GeometryAttribute::NORMAL, 8);
encoder.SetEncodingMethod(MESH_EDGEBREAKER_ENCODING);
- ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok());
+ DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer));
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h
index cb3c29dd6..c650bc352 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h
+++ b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_shared.h
@@ -50,8 +50,6 @@ namespace draco {
// \ / S \ / / E \
// *-------* *-------*
//
-// TODO(ostava): Get rid of the topology bit pattern. It's important only for
-// encoding but the algorithms should use EdgebreakerSymbol instead.
enum EdgebreakerTopologyBitPattern {
TOPOLOGY_C = 0x0, // 0
TOPOLOGY_S = 0x1, // 1 0 0
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
index c00373727..89553e909 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
+++ b/contrib/draco/src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
@@ -129,7 +129,11 @@ class MeshEdgebreakerTraversalValenceDecoder
if (context_counter < 0) {
return TOPOLOGY_INVALID;
}
- const int symbol_id = context_symbols_[active_context_][context_counter];
+ const uint32_t symbol_id =
+ context_symbols_[active_context_][context_counter];
+ if (symbol_id > 4) {
+ return TOPOLOGY_INVALID;
+ }
last_symbol_ = edge_breaker_symbol_to_topology_id[symbol_id];
} else {
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc b/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc
index 55f683696..2dfdb58ef 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc
+++ b/contrib/draco/src/draco/compression/mesh/mesh_encoder_test.cc
@@ -78,9 +78,10 @@ class MeshEncoderTest : public ::testing::TestWithParam {
encoder.SetAttributeQuantization(i, 12);
}
EncoderBuffer buffer;
- ASSERT_TRUE(encoder.EncodeToBuffer(&buffer).ok())
- << "Failed encoding test mesh " << file_name << " with method "
- << GetParam().encoding_method;
+ const Status status = encoder.EncodeToBuffer(&buffer);
+ EXPECT_TRUE(status.ok()) << "Failed encoding test mesh " << file_name
+ << " with method " << GetParam().encoding_method;
+ DRACO_ASSERT_OK(status);
// Check that the encoded mesh was really encoded with the selected method.
DecoderBuffer decoder_buffer;
decoder_buffer.Init(buffer.data(), buffer.size());
@@ -88,6 +89,7 @@ class MeshEncoderTest : public ::testing::TestWithParam {
uint8_t encoded_method;
ASSERT_TRUE(decoder_buffer.Decode(&encoded_method));
ASSERT_EQ(encoded_method, method);
+
if (!FLAGS_update_golden_files) {
EXPECT_TRUE(
CompareGoldenFile(golden_file_name, buffer.data(), buffer.size()))
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc b/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc
index be349f543..595a487a4 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc
+++ b/contrib/draco/src/draco/compression/mesh/mesh_sequential_decoder.cc
@@ -96,7 +96,7 @@ bool MeshSequentialDecoder::DecodeConnectivity() {
}
mesh()->AddFace(face);
}
- } else if (mesh()->num_points() < (1 << 21) &&
+ } else if (num_points < (1 << 21) &&
bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 2)) {
// Decode indices as uint32_t.
for (uint32_t i = 0; i < num_faces; ++i) {
@@ -158,6 +158,10 @@ bool MeshSequentialDecoder::DecodeAndDecompressIndices(uint32_t num_faces) {
index_diff = -index_diff;
}
const int32_t index_value = index_diff + last_index_value;
+ if (index_value < 0) {
+ // Negative indices are not allowed.
+ return false;
+ }
face[j] = index_value;
last_index_value = index_value;
}
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc b/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc
index 02ac7779e..fd8b11392 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc
+++ b/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.cc
@@ -32,8 +32,6 @@ Status MeshSequentialEncoder::EncodeConnectivity() {
EncodeVarint(static_cast(mesh()->num_points()), buffer());
// We encode all attributes in the original (possibly duplicated) format.
- // TODO(ostava): This may not be optimal if we have only one attribute or if
- // all attributes share the same index mapping.
if (options()->GetGlobalBool("compress_connectivity", false)) {
// 0 = Encode compressed indices.
buffer()->Encode(static_cast(0));
@@ -44,8 +42,6 @@ Status MeshSequentialEncoder::EncodeConnectivity() {
// 1 = Encode indices directly.
buffer()->Encode(static_cast(1));
// Store vertex indices using a smallest data type that fits their range.
- // TODO(ostava): This can be potentially improved by using a tighter
- // fit that is not bound by a bit-length of any particular data type.
if (mesh()->num_points() < 256) {
// Serialize indices as uint8_t.
for (FaceIndex i(0); i < num_faces; ++i) {
diff --git a/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h b/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h
index 672609642..6e2b05877 100644
--- a/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h
+++ b/contrib/draco/src/draco/compression/mesh/mesh_sequential_encoder.h
@@ -33,7 +33,6 @@ namespace draco {
// Class that encodes mesh data using a simple binary representation of mesh's
// connectivity and geometry.
-// TODO(ostava): Use a better name.
class MeshSequentialEncoder : public MeshEncoder {
public:
MeshSequentialEncoder();
diff --git a/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h b/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h
index e66dd14b2..dd9738ba2 100644
--- a/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h
+++ b/contrib/draco/src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h
@@ -25,7 +25,7 @@ namespace draco {
// values based on the traversal of the encoded mesh. The class should be used
// as the TraversalObserverT member of a Traverser class such as the
// DepthFirstTraverser (depth_first_traverser.h).
-// TODO(hemmer): rename to AttributeIndicesCodingTraverserObserver
+// TODO(b/199760123): Rename to AttributeIndicesCodingTraverserObserver.
template
class MeshAttributeIndicesEncodingObserver {
public:
diff --git a/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h b/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h
index ebe1d5f7a..e55c93a79 100644
--- a/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h
+++ b/contrib/draco/src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h
@@ -25,7 +25,7 @@ namespace draco {
// Sequencer that generates point sequence in an order given by a deterministic
// traversal on the mesh surface. Note that all attributes encoded with this
// sequence must share the same connectivity.
-// TODO(hemmer): Consider refactoring such that this is an observer.
+// TODO(b/199760123): Consider refactoring such that this is an observer.
template
class MeshTraversalSequencer : public PointsSequencer {
public:
diff --git a/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h b/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h
index 87bc2b7ef..55bafe7c4 100644
--- a/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h
+++ b/contrib/draco/src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h
@@ -18,8 +18,10 @@
#define DRACO_COMPRESSION_POINT_CLOUD_ALGORITHMS_DYNAMIC_INTEGER_POINTS_KD_TREE_DECODER_H_
#include
+#include
#include
#include
+#include
#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h"
#include "draco/compression/bit_coders/direct_bit_decoder.h"
@@ -92,17 +94,29 @@ class DynamicIntegerPointsKdTreeDecoder {
base_stack_(32 * dimension + 1, VectorUint32(dimension, 0)),
levels_stack_(32 * dimension + 1, VectorUint32(dimension, 0)) {}
- // Decodes a integer point cloud from |buffer|.
+ // Decodes an integer point cloud from |buffer|. Optional |oit_max_points| can
+ // be used to tell the decoder the maximum number of points accepted by the
+ // iterator.
template
bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT &oit);
+ template
+ bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT &oit,
+ uint32_t oit_max_points);
+
#ifndef DRACO_OLD_GCC
template
bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT &&oit);
+ template
+ bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT &&oit,
+ uint32_t oit_max_points);
#endif // DRACO_OLD_GCC
const uint32_t dimension() const { return dimension_; }
+ // Returns the number of decoded points. Must be called after DecodePoints().
+ uint32_t num_decoded_points() const { return num_decoded_points_; }
+
private:
uint32_t GetAxis(uint32_t num_remaining_points, const VectorUint32 &levels,
uint32_t last_axis);
@@ -146,8 +160,15 @@ template
template
bool DynamicIntegerPointsKdTreeDecoder::DecodePoints(
DecoderBuffer *buffer, OutputIteratorT &&oit) {
+ return DecodePoints(buffer, oit, std::numeric_limits::max());
+}
+
+template
+template
+bool DynamicIntegerPointsKdTreeDecoder::DecodePoints(
+ DecoderBuffer *buffer, OutputIteratorT &&oit, uint32_t oit_max_points) {
OutputIteratorT local = std::forward(oit);
- return DecodePoints(buffer, local);
+ return DecodePoints(buffer, local, oit_max_points);
}
#endif // DRACO_OLD_GCC
@@ -155,6 +176,13 @@ template
template