Merge branch 'master' into master

pull/3650/head
Kim Kulling 2021-02-20 19:35:31 +01:00 committed by GitHub
commit 616d924d29
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
432 changed files with 56994 additions and 197 deletions

View File

@ -69,6 +69,12 @@ jobs:
repository: cpp-pm/polly repository: cpp-pm/polly
path: cmake/polly path: cmake/polly
- name: Remove contrib directory for Hunter builds
if: contains(matrix.name, 'hunter')
uses: JesseTG/rm@v1.0.2
with:
path: contrib
- name: Cache DX SDK - name: Cache DX SDK
id: dxcache id: dxcache
if: contains(matrix.name, 'windows') if: contains(matrix.name, 'windows')

View File

@ -46,8 +46,8 @@ option(ASSIMP_HUNTER_ENABLED "Enable Hunter package manager support" OFF)
IF(ASSIMP_HUNTER_ENABLED) IF(ASSIMP_HUNTER_ENABLED)
include("cmake/HunterGate.cmake") include("cmake/HunterGate.cmake")
HunterGate( HunterGate(
URL "https://github.com/cpp-pm/hunter/archive/v0.23.269.tar.gz" URL "https://github.com/cpp-pm/hunter/archive/v0.23.293.tar.gz"
SHA1 "64024b7b95b4c86d50ae05b926814448c93a70a0" SHA1 "e8e5470652db77149d9b38656db2a6c0b7642693"
) )
add_definitions(-DASSIMP_USE_HUNTER) add_definitions(-DASSIMP_USE_HUNTER)
@ -452,6 +452,12 @@ IF(ASSIMP_HUNTER_ENABLED)
set(ZLIB_LIBRARIES ZLIB::zlib) set(ZLIB_LIBRARIES ZLIB::zlib)
set(ASSIMP_BUILD_MINIZIP TRUE) set(ASSIMP_BUILD_MINIZIP TRUE)
ELSE() ELSE()
# If the zlib is already found outside, add an export in case assimpTargets can't find it.
IF( ZLIB_FOUND )
INSTALL( TARGETS zlib
EXPORT "${TARGETS_EXPORT_NAME}")
ENDIF()
IF ( NOT ASSIMP_BUILD_ZLIB ) IF ( NOT ASSIMP_BUILD_ZLIB )
FIND_PACKAGE(ZLIB) FIND_PACKAGE(ZLIB)
ENDIF() ENDIF()
@ -567,6 +573,94 @@ ELSE ()
ADD_DEFINITIONS( -DASSIMP_BUILD_NO_C4D_IMPORTER ) ADD_DEFINITIONS( -DASSIMP_BUILD_NO_C4D_IMPORTER )
ENDIF () ENDIF ()
# Draco requires cmake 3.12
IF (DEFINED CMAKE_VERSION AND "${CMAKE_VERSION}" VERSION_LESS "3.12")
message(NOTICE "draco requires cmake 3.12 or newer, cmake is ${CMAKE_VERSION} . Draco is disabled")
SET ( ASSIMP_BUILD_DRACO OFF CACHE BOOL "Disabled: Draco requires newer cmake" FORCE )
ELSE()
OPTION ( ASSIMP_BUILD_DRACO "If the Draco libraries are to be built. Primarily for glTF" ON )
IF ( ASSIMP_BUILD_DRACO )
# Primarily for glTF v2
# Enable Draco glTF feature set
set(DRACO_GLTF ON CACHE BOOL "" FORCE)
# Disable unnecessary or omitted components
set(DRACO_JS_GLUE OFF CACHE BOOL "" FORCE)
set(DRACO_WASM OFF CACHE BOOL "" FORCE)
set(DRACO_MAYA_PLUGIN OFF CACHE BOOL "" FORCE)
set(DRACO_UNITY_PLUGIN OFF CACHE BOOL "" FORCE)
set(DRACO_TESTS OFF CACHE BOOL "" FORCE)
IF(ASSIMP_HUNTER_ENABLED)
hunter_add_package(draco)
find_package(draco CONFIG REQUIRED)
set(draco_LIBRARIES draco::draco)
ELSE()
# Draco 1.4.1 has many warnings and will not build with /WX or -Werror
# See https://github.com/google/draco/issues/672
# and https://github.com/google/draco/issues/673
IF(MSVC)
set(DRACO_CXX_FLAGS "/W0")
ELSE()
list(APPEND DRACO_CXX_FLAGS
"-Wno-bool-compare"
"-Wno-comment"
"-Wno-maybe-uninitialized"
"-Wno-sign-compare"
"-Wno-unused-local-typedefs"
)
# Draco 1.4.1 does not explicitly export any symbols under GCC/clang
list(APPEND DRACO_CXX_FLAGS
"-fvisibility=default"
)
ENDIF()
# Don't build or install all of Draco by default
ADD_SUBDIRECTORY( "contrib/draco" EXCLUDE_FROM_ALL )
if(MSVC OR WIN32)
set(draco_LIBRARIES "draco")
else()
if(BUILD_SHARED_LIBS)
set(draco_LIBRARIES "draco_shared")
else()
set(draco_LIBRARIES "draco_static")
endif()
endif()
# Don't build the draco command-line tools by default
set_target_properties(draco_encoder draco_decoder PROPERTIES
EXCLUDE_FROM_ALL TRUE
EXCLUDE_FROM_DEFAULT_BUILD TRUE
)
# Do build the draco shared library
set_target_properties(${draco_LIBRARIES} PROPERTIES
EXCLUDE_FROM_ALL FALSE
EXCLUDE_FROM_DEFAULT_BUILD FALSE
)
TARGET_USE_COMMON_OUTPUT_DIRECTORY(${draco_LIBRARIES})
TARGET_USE_COMMON_OUTPUT_DIRECTORY(draco_encoder)
TARGET_USE_COMMON_OUTPUT_DIRECTORY(draco_decoder)
set(draco_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/contrib/draco/src")
# This is probably wrong
INSTALL( TARGETS ${draco_LIBRARIES}
EXPORT "${TARGETS_EXPORT_NAME}"
LIBRARY DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
ARCHIVE DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
RUNTIME DESTINATION ${ASSIMP_BIN_INSTALL_DIR}
FRAMEWORK DESTINATION ${ASSIMP_LIB_INSTALL_DIR}
COMPONENT ${LIBASSIMP_COMPONENT}
INCLUDES DESTINATION include
)
ENDIF()
ENDIF()
ENDIF()
# Main assimp code
ADD_SUBDIRECTORY( code/ ) ADD_SUBDIRECTORY( code/ )
IF ( ASSIMP_BUILD_ASSIMP_TOOLS ) IF ( ASSIMP_BUILD_ASSIMP_TOOLS )
# The viewer for windows only # The viewer for windows only
@ -580,7 +674,7 @@ IF ( ASSIMP_BUILD_ASSIMP_TOOLS )
ADD_SUBDIRECTORY( tools/assimp_cmd/ ) ADD_SUBDIRECTORY( tools/assimp_cmd/ )
ENDIF () ENDIF ()
IF ( ASSIMP_BUILD_SAMPLES) IF ( ASSIMP_BUILD_SAMPLES )
SET( SAMPLES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/samples ) SET( SAMPLES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/samples )
SET( SAMPLES_SHARED_CODE_DIR ${SAMPLES_DIR}/SharedCode ) SET( SAMPLES_SHARED_CODE_DIR ${SAMPLES_DIR}/SharedCode )
IF ( WIN32 ) IF ( WIN32 )

View File

@ -45,6 +45,7 @@ Take a look into the https://github.com/assimp/assimp/blob/master/Build.md file.
* [Unity 3d Plugin](https://www.assetstore.unity3d.com/en/#!/content/91777) * [Unity 3d Plugin](https://www.assetstore.unity3d.com/en/#!/content/91777)
* [JVM](https://github.com/kotlin-graphics/assimp) Full jvm port (current [status](https://github.com/kotlin-graphics/assimp/wiki/Status)) * [JVM](https://github.com/kotlin-graphics/assimp) Full jvm port (current [status](https://github.com/kotlin-graphics/assimp/wiki/Status))
* [HAXE-Port](https://github.com/longde123/assimp-haxe) The Assimp-HAXE-port. * [HAXE-Port](https://github.com/longde123/assimp-haxe) The Assimp-HAXE-port.
* [Rust](https://github.com/jkvargas/russimp)
### Other tools ### ### Other tools ###
[open3mod](https://github.com/acgessler/open3mod) is a powerful 3D model viewer based on Assimp's import and export abilities. [open3mod](https://github.com/acgessler/open3mod) is a powerful 3D model viewer based on Assimp's import and export abilities.

View File

@ -10,5 +10,9 @@ find_package(polyclipping CONFIG REQUIRED)
find_package(zip CONFIG REQUIRED) find_package(zip CONFIG REQUIRED)
find_package(pugixml CONFIG REQUIRED) find_package(pugixml CONFIG REQUIRED)
if(@ASSIMP_BUILD_DRACO@)
find_package(draco CONFIG REQUIRED)
endif()
include("${CMAKE_CURRENT_LIST_DIR}/@TARGETS_EXPORT_NAME@.cmake") include("${CMAKE_CURRENT_LIST_DIR}/@TARGETS_EXPORT_NAME@.cmake")
check_required_components("@PROJECT_NAME@") check_required_components("@PROJECT_NAME@")

View File

@ -122,8 +122,8 @@ void ObjFileMtlImporter::load() {
{ {
++m_DataIt; ++m_DataIt;
getColorRGBA(&m_pModel->m_pCurrentMaterial->ambient); getColorRGBA(&m_pModel->m_pCurrentMaterial->ambient);
} else if (*m_DataIt == 'd') // Diffuse color } else if (*m_DataIt == 'd') {
{ // Diffuse color
++m_DataIt; ++m_DataIt;
getColorRGBA(&m_pModel->m_pCurrentMaterial->diffuse); getColorRGBA(&m_pModel->m_pCurrentMaterial->diffuse);
} else if (*m_DataIt == 's') { } else if (*m_DataIt == 's') {
@ -144,7 +144,9 @@ void ObjFileMtlImporter::load() {
} else if (*m_DataIt == 'r') { } else if (*m_DataIt == 'r') {
// Material transmission alpha value // Material transmission alpha value
++m_DataIt; ++m_DataIt;
getFloatValue(m_pModel->m_pCurrentMaterial->alpha); ai_real d;
getFloatValue(d);
m_pModel->m_pCurrentMaterial->alpha = static_cast<ai_real>(1.0) - d;
} }
m_DataIt = skipLine<DataArrayIt>(m_DataIt, m_DataItEnd, m_uiLine); m_DataIt = skipLine<DataArrayIt>(m_DataIt, m_DataItEnd, m_uiLine);
} break; } break;

View File

@ -54,7 +54,7 @@ size_t DecodeBase64(const char *in, size_t inLength, uint8_t *&out) {
} }
if (inLength < 4) { if (inLength < 4) {
out = 0; out = nullptr;
return 0; return 0;
} }

View File

@ -107,7 +107,6 @@ public:
f(file) {} f(file) {}
~IOStream() { ~IOStream() {
fclose(f); fclose(f);
f = 0;
} }
size_t Read(void *b, size_t sz, size_t n) { return fread(b, sz, n, f); } size_t Read(void *b, size_t sz, size_t n) { return fread(b, sz, n, f); }

View File

@ -376,87 +376,6 @@ struct Object {
// Classes for each glTF top-level object type // Classes for each glTF top-level object type
// //
//! A typed view into a BufferView. A BufferView contains raw binary data.
//! An accessor provides a typed view into a BufferView or a subset of a BufferView
//! similar to how WebGL's vertexAttribPointer() defines an attribute in a buffer.
struct Accessor : public Object {
struct Sparse;
Ref<BufferView> bufferView; //!< The ID of the bufferView. (required)
size_t byteOffset; //!< The offset relative to the start of the bufferView in bytes. (required)
ComponentType componentType; //!< The datatype of components in the attribute. (required)
size_t count; //!< The number of attributes referenced by this accessor. (required)
AttribType::Value type; //!< Specifies if the attribute is a scalar, vector, or matrix. (required)
std::vector<double> max; //!< Maximum value of each component in this attribute.
std::vector<double> min; //!< Minimum value of each component in this attribute.
std::unique_ptr<Sparse> sparse;
unsigned int GetNumComponents();
unsigned int GetBytesPerComponent();
unsigned int GetElementSize();
inline uint8_t *GetPointer();
template <class T>
void ExtractData(T *&outData);
void WriteData(size_t count, const void *src_buffer, size_t src_stride);
void WriteSparseValues(size_t count, const void *src_data, size_t src_dataStride);
void WriteSparseIndices(size_t count, const void *src_idx, size_t src_idxStride);
//! Helper class to iterate the data
class Indexer {
friend struct Accessor;
// This field is reported as not used, making it protectd is the easiest way to work around it without going to the bottom of what the problem is:
// ../code/glTF2/glTF2Asset.h:392:19: error: private field 'accessor' is not used [-Werror,-Wunused-private-field]
protected:
Accessor &accessor;
private:
uint8_t *data;
size_t elemSize, stride;
Indexer(Accessor &acc);
public:
//! Accesses the i-th value as defined by the accessor
template <class T>
T GetValue(int i);
//! Accesses the i-th value as defined by the accessor
inline unsigned int GetUInt(int i) {
return GetValue<unsigned int>(i);
}
inline bool IsValid() const {
return data != 0;
}
};
inline Indexer GetIndexer() {
return Indexer(*this);
}
Accessor() {}
void Read(Value &obj, Asset &r);
//sparse
struct Sparse {
size_t count;
ComponentType indicesType;
Ref<BufferView> indices;
size_t indicesByteOffset;
Ref<BufferView> values;
size_t valuesByteOffset;
std::vector<uint8_t> data; //!< Actual data, which may be defaulted to an array of zeros or the original data, with the sparse buffer view applied on top of it.
void PopulateData(size_t numBytes, uint8_t *bytes);
void PatchData(unsigned int elementSize);
};
};
//! A buffer points to binary geometry, animation, or skins. //! A buffer points to binary geometry, animation, or skins.
struct Buffer : public Object { struct Buffer : public Object {
/********************* Types *********************/ /********************* Types *********************/
@ -594,6 +513,90 @@ struct BufferView : public Object {
uint8_t *GetPointer(size_t accOffset); uint8_t *GetPointer(size_t accOffset);
}; };
//! A typed view into a BufferView. A BufferView contains raw binary data.
//! An accessor provides a typed view into a BufferView or a subset of a BufferView
//! similar to how WebGL's vertexAttribPointer() defines an attribute in a buffer.
struct Accessor : public Object {
struct Sparse;
Ref<BufferView> bufferView; //!< The ID of the bufferView. (required)
size_t byteOffset; //!< The offset relative to the start of the bufferView in bytes. (required)
ComponentType componentType; //!< The datatype of components in the attribute. (required)
size_t count; //!< The number of attributes referenced by this accessor. (required)
AttribType::Value type; //!< Specifies if the attribute is a scalar, vector, or matrix. (required)
std::vector<double> max; //!< Maximum value of each component in this attribute.
std::vector<double> min; //!< Minimum value of each component in this attribute.
std::unique_ptr<Sparse> sparse;
std::unique_ptr<Buffer> decodedBuffer; // Packed decoded data, returned instead of original bufferView if present
unsigned int GetNumComponents();
unsigned int GetBytesPerComponent();
unsigned int GetElementSize();
inline uint8_t *GetPointer();
inline size_t GetStride();
inline size_t GetMaxByteSize();
template <class T>
void ExtractData(T *&outData);
void WriteData(size_t count, const void *src_buffer, size_t src_stride);
void WriteSparseValues(size_t count, const void *src_data, size_t src_dataStride);
void WriteSparseIndices(size_t count, const void *src_idx, size_t src_idxStride);
//! Helper class to iterate the data
class Indexer {
friend struct Accessor;
// This field is reported as not used, making it protectd is the easiest way to work around it without going to the bottom of what the problem is:
// ../code/glTF2/glTF2Asset.h:392:19: error: private field 'accessor' is not used [-Werror,-Wunused-private-field]
protected:
Accessor &accessor;
private:
uint8_t *data;
size_t elemSize, stride;
Indexer(Accessor &acc);
public:
//! Accesses the i-th value as defined by the accessor
template <class T>
T GetValue(int i);
//! Accesses the i-th value as defined by the accessor
inline unsigned int GetUInt(int i) {
return GetValue<unsigned int>(i);
}
inline bool IsValid() const {
return data != nullptr;
}
};
inline Indexer GetIndexer() {
return Indexer(*this);
}
Accessor() {}
void Read(Value &obj, Asset &r);
//sparse
struct Sparse {
size_t count;
ComponentType indicesType;
Ref<BufferView> indices;
size_t indicesByteOffset;
Ref<BufferView> values;
size_t valuesByteOffset;
std::vector<uint8_t> data; //!< Actual data, which may be defaulted to an array of zeros or the original data, with the sparse buffer view applied on top of it.
void PopulateData(size_t numBytes, uint8_t *bytes);
void PatchData(unsigned int elementSize);
};
};
struct Camera : public Object { struct Camera : public Object {
enum Type { enum Type {
Perspective, Perspective,
@ -846,7 +849,7 @@ struct CustomExtension : public Object {
CustomExtension() = default; CustomExtension() = default;
CustomExtension(const CustomExtension& other) CustomExtension(const CustomExtension &other)
: Object(other) : Object(other)
, mStringValue(other.mStringValue) , mStringValue(other.mStringValue)
, mDoubleValue(other.mDoubleValue) , mDoubleValue(other.mDoubleValue)
@ -1092,6 +1095,7 @@ public:
bool KHR_materials_sheen; bool KHR_materials_sheen;
bool KHR_materials_clearcoat; bool KHR_materials_clearcoat;
bool KHR_materials_transmission; bool KHR_materials_transmission;
bool KHR_draco_mesh_compression;
} extensionsUsed; } extensionsUsed;
//! Keeps info about the required extensions //! Keeps info about the required extensions
@ -1100,7 +1104,7 @@ public:
} extensionsRequired; } extensionsRequired;
AssetMetadata asset; AssetMetadata asset;
Value* extras = nullptr; Value *extras = nullptr;
// Dictionaries for each type of object // Dictionaries for each type of object
@ -1122,7 +1126,7 @@ public:
Ref<Scene> scene; Ref<Scene> scene;
public: public:
Asset(IOSystem *io = 0) : Asset(IOSystem *io = nullptr) :
mIOSystem(io), mIOSystem(io),
asset(), asset(),
accessors(*this, "accessors"), accessors(*this, "accessors"),

View File

@ -42,9 +42,40 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "AssetLib/glTF/glTFCommon.h" #include "AssetLib/glTF/glTFCommon.h"
#include <assimp/MemoryIOWrapper.h>
#include <assimp/StringUtils.h> #include <assimp/StringUtils.h>
#include <assimp/DefaultLogger.hpp> #include <assimp/DefaultLogger.hpp>
#include <assimp/MemoryIOWrapper.h>
#ifdef ASSIMP_ENABLE_DRACO
// Google draco library headers spew many warnings. Bad Google, no cookie
#if _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4018) // Signed/unsigned mismatch
#pragma warning(disable : 4804) // Unsafe use of type 'bool'
#elif defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-compare"
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wbool-compare"
#pragma GCC diagnostic ignored "-Wsign-compare"
#endif
#include "draco/compression/decode.h"
#include "draco/core/decoder_buffer.h"
#if _MSC_VER
#pragma warning(pop)
#elif defined(__clang__)
#pragma clang diagnostic pop
#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
#ifndef DRACO_MESH_COMPRESSION_SUPPORTED
#error glTF: KHR_draco_mesh_compression: draco library must have DRACO_MESH_COMPRESSION_SUPPORTED
#endif
#endif
using namespace Assimp; using namespace Assimp;
@ -146,35 +177,155 @@ inline static T MemberOrDefault(Value &obj, const char *id, T defaultValue) {
inline Value *FindMember(Value &val, const char *id) { inline Value *FindMember(Value &val, const char *id) {
Value::MemberIterator it = val.FindMember(id); Value::MemberIterator it = val.FindMember(id);
return (it != val.MemberEnd()) ? &it->value : 0; return (it != val.MemberEnd()) ? &it->value : nullptr;
} }
inline Value *FindString(Value &val, const char *id) { inline Value *FindString(Value &val, const char *id) {
Value::MemberIterator it = val.FindMember(id); Value::MemberIterator it = val.FindMember(id);
return (it != val.MemberEnd() && it->value.IsString()) ? &it->value : 0; return (it != val.MemberEnd() && it->value.IsString()) ? &it->value : nullptr;
} }
inline Value *FindNumber(Value &val, const char *id) { inline Value *FindNumber(Value &val, const char *id) {
Value::MemberIterator it = val.FindMember(id); Value::MemberIterator it = val.FindMember(id);
return (it != val.MemberEnd() && it->value.IsNumber()) ? &it->value : 0; return (it != val.MemberEnd() && it->value.IsNumber()) ? &it->value : nullptr;
} }
inline Value *FindUInt(Value &val, const char *id) { inline Value *FindUInt(Value &val, const char *id) {
Value::MemberIterator it = val.FindMember(id); Value::MemberIterator it = val.FindMember(id);
return (it != val.MemberEnd() && it->value.IsUint()) ? &it->value : 0; return (it != val.MemberEnd() && it->value.IsUint()) ? &it->value : nullptr;
} }
inline Value *FindArray(Value &val, const char *id) { inline Value *FindArray(Value &val, const char *id) {
Value::MemberIterator it = val.FindMember(id); Value::MemberIterator it = val.FindMember(id);
return (it != val.MemberEnd() && it->value.IsArray()) ? &it->value : 0; return (it != val.MemberEnd() && it->value.IsArray()) ? &it->value : nullptr;
} }
inline Value *FindObject(Value &val, const char *id) { inline Value *FindObject(Value &val, const char *id) {
Value::MemberIterator it = val.FindMember(id); Value::MemberIterator it = val.FindMember(id);
return (it != val.MemberEnd() && it->value.IsObject()) ? &it->value : 0; return (it != val.MemberEnd() && it->value.IsObject()) ? &it->value : nullptr;
}
inline Value *FindExtension(Value &val, const char *extensionId) {
if (Value *extensionList = FindObject(val, "extensions")) {
if (Value *extension = FindObject(*extensionList, extensionId)) {
return extension;
}
}
return nullptr;
} }
} // namespace } // namespace
#ifdef ASSIMP_ENABLE_DRACO
template <typename T>
inline void CopyFaceIndex_Draco(Buffer &decodedIndexBuffer, const draco::Mesh &draco_mesh) {
const size_t faceStride = sizeof(T) * 3;
for (draco::FaceIndex f(0); f < draco_mesh.num_faces(); ++f) {
const draco::Mesh::Face &face = draco_mesh.face(f);
T indices[3] = { static_cast<T>(face[0].value()), static_cast<T>(face[1].value()), static_cast<T>(face[2].value()) };
memcpy(decodedIndexBuffer.GetPointer() + (f.value() * faceStride), &indices[0], faceStride);
}
}
inline void SetDecodedIndexBuffer_Draco(const draco::Mesh &dracoMesh, Mesh::Primitive &prim) {
if (!prim.indices || dracoMesh.num_faces() == 0)
return;
// Create a decoded Index buffer (if there is one)
size_t componentBytes = prim.indices->GetBytesPerComponent();
std::unique_ptr<Buffer> decodedIndexBuffer(new Buffer());
decodedIndexBuffer->Grow(dracoMesh.num_faces() * 3 * componentBytes);
// If accessor uses the same size as draco implementation, copy the draco buffer directly
// Usually uint32_t but shouldn't assume
if (sizeof(dracoMesh.face(draco::FaceIndex(0))[0]) == componentBytes) {
memcpy(decodedIndexBuffer->GetPointer(), &dracoMesh.face(draco::FaceIndex(0))[0], decodedIndexBuffer->byteLength);
return;
}
// Not same size, convert
switch (componentBytes) {
case sizeof(uint32_t):
CopyFaceIndex_Draco<uint32_t>(*decodedIndexBuffer, dracoMesh);
break;
case sizeof(uint16_t):
CopyFaceIndex_Draco<uint16_t>(*decodedIndexBuffer, dracoMesh);
break;
case sizeof(uint8_t):
CopyFaceIndex_Draco<uint8_t>(*decodedIndexBuffer, dracoMesh);
break;
default:
ai_assert(false);
break;
}
// Assign this alternate data buffer to the accessor
prim.indices->decodedBuffer.swap(decodedIndexBuffer);
}
template <typename T>
static bool GetAttributeForAllPoints_Draco(const draco::Mesh &dracoMesh,
const draco::PointAttribute &dracoAttribute,
Buffer &outBuffer) {
size_t byteOffset = 0;
T values[4] = { 0, 0, 0, 0 };
for (draco::PointIndex i(0); i < dracoMesh.num_points(); ++i) {
const draco::AttributeValueIndex val_index = dracoAttribute.mapped_index(i);
if (!dracoAttribute.ConvertValue<T>(val_index, dracoAttribute.num_components(), values)) {
return false;
}
memcpy(outBuffer.GetPointer() + byteOffset, &values[0], sizeof(T) * dracoAttribute.num_components());
byteOffset += sizeof(T) * dracoAttribute.num_components();
}
return true;
}
inline void SetDecodedAttributeBuffer_Draco(const draco::Mesh &dracoMesh, uint32_t dracoAttribId, Accessor &accessor) {
// Create decoded buffer
const draco::PointAttribute *pDracoAttribute = dracoMesh.GetAttributeByUniqueId(dracoAttribId);
if (pDracoAttribute == nullptr) {
throw DeadlyImportError("GLTF: Invalid draco attribute id: ", dracoAttribId);
}
size_t componentBytes = accessor.GetBytesPerComponent();
std::unique_ptr<Buffer> decodedAttribBuffer(new Buffer());
decodedAttribBuffer->Grow(dracoMesh.num_points() * pDracoAttribute->num_components() * componentBytes);
switch (accessor.componentType) {
case ComponentType_BYTE:
GetAttributeForAllPoints_Draco<int8_t>(dracoMesh, *pDracoAttribute, *decodedAttribBuffer);
break;
case ComponentType_UNSIGNED_BYTE:
GetAttributeForAllPoints_Draco<uint8_t>(dracoMesh, *pDracoAttribute, *decodedAttribBuffer);
break;
case ComponentType_SHORT:
GetAttributeForAllPoints_Draco<int16_t>(dracoMesh, *pDracoAttribute, *decodedAttribBuffer);
break;
case ComponentType_UNSIGNED_SHORT:
GetAttributeForAllPoints_Draco<uint16_t>(dracoMesh, *pDracoAttribute, *decodedAttribBuffer);
break;
case ComponentType_UNSIGNED_INT:
GetAttributeForAllPoints_Draco<uint32_t>(dracoMesh, *pDracoAttribute, *decodedAttribBuffer);
break;
case ComponentType_FLOAT:
GetAttributeForAllPoints_Draco<float>(dracoMesh, *pDracoAttribute, *decodedAttribBuffer);
break;
default:
ai_assert(false);
break;
}
// Assign this alternate data buffer to the accessor
accessor.decodedBuffer.swap(decodedAttribBuffer);
}
#endif // ASSIMP_ENABLE_DRACO
// //
// LazyDict methods // LazyDict methods
// //
@ -197,7 +348,7 @@ inline LazyDict<T>::~LazyDict() {
template <class T> template <class T>
inline void LazyDict<T>::AttachToDocument(Document &doc) { inline void LazyDict<T>::AttachToDocument(Document &doc) {
Value *container = 0; Value *container = nullptr;
if (mExtId) { if (mExtId) {
if (Value *exts = FindObject(doc, "extensions")) { if (Value *exts = FindObject(doc, "extensions")) {
@ -214,7 +365,7 @@ inline void LazyDict<T>::AttachToDocument(Document &doc) {
template <class T> template <class T>
inline void LazyDict<T>::DetachFromDocument() { inline void LazyDict<T>::DetachFromDocument() {
mDict = 0; mDict = nullptr;
} }
template <class T> template <class T>
@ -382,7 +533,7 @@ inline void Buffer::Read(Value &obj, Asset &r) {
glTFCommon::Util::DataURI dataURI; glTFCommon::Util::DataURI dataURI;
if (ParseDataURI(uri, it->GetStringLength(), dataURI)) { if (ParseDataURI(uri, it->GetStringLength(), dataURI)) {
if (dataURI.base64) { if (dataURI.base64) {
uint8_t *data = 0; uint8_t *data = nullptr;
this->byteLength = glTFCommon::Util::DecodeBase64(dataURI.data, dataURI.dataLength, data); this->byteLength = glTFCommon::Util::DecodeBase64(dataURI.data, dataURI.dataLength, data);
this->mData.reset(data, std::default_delete<uint8_t[]>()); this->mData.reset(data, std::default_delete<uint8_t[]>());
@ -401,10 +552,7 @@ inline void Buffer::Read(Value &obj, Asset &r) {
} }
} else { // Local file } else { // Local file
if (byteLength > 0) { if (byteLength > 0) {
std::string dir = !r.mCurrentAssetDir.empty() ? ( std::string dir = !r.mCurrentAssetDir.empty() ? (r.mCurrentAssetDir.back() == '/' ? r.mCurrentAssetDir : r.mCurrentAssetDir + '/') : "";
r.mCurrentAssetDir.back() == '/' ?
r.mCurrentAssetDir : r.mCurrentAssetDir + '/'
) : "";
IOStream *file = r.OpenFile(dir + uri, "rb"); IOStream *file = r.OpenFile(dir + uri, "rb");
if (file) { if (file) {
@ -575,9 +723,9 @@ inline void BufferView::Read(Value &obj, Asset &r) {
} }
inline uint8_t *BufferView::GetPointer(size_t accOffset) { inline uint8_t *BufferView::GetPointer(size_t accOffset) {
if (!buffer) return 0; if (!buffer) return nullptr;
uint8_t *basePtr = buffer->GetPointer(); uint8_t *basePtr = buffer->GetPointer();
if (!basePtr) return 0; if (!basePtr) return nullptr;
size_t offset = accOffset + byteOffset; size_t offset = accOffset + byteOffset;
if (buffer->EncodedRegion_Current != nullptr) { if (buffer->EncodedRegion_Current != nullptr) {
@ -709,12 +857,15 @@ inline unsigned int Accessor::GetElementSize() {
} }
inline uint8_t *Accessor::GetPointer() { inline uint8_t *Accessor::GetPointer() {
if (decodedBuffer)
return decodedBuffer->GetPointer();
if (sparse) if (sparse)
return sparse->data.data(); return sparse->data.data();
if (!bufferView || !bufferView->buffer) return 0; if (!bufferView || !bufferView->buffer) return nullptr;
uint8_t *basePtr = bufferView->buffer->GetPointer(); uint8_t *basePtr = bufferView->buffer->GetPointer();
if (!basePtr) return 0; if (!basePtr) return nullptr;
size_t offset = byteOffset + bufferView->byteOffset; size_t offset = byteOffset + bufferView->byteOffset;
@ -730,6 +881,22 @@ inline uint8_t *Accessor::GetPointer() {
return basePtr + offset; return basePtr + offset;
} }
inline size_t Accessor::GetStride() {
// Decoded buffer is always packed
if (decodedBuffer)
return GetElementSize();
// Sparse and normal bufferView
return (bufferView && bufferView->byteStride ? bufferView->byteStride : GetElementSize());
}
inline size_t Accessor::GetMaxByteSize() {
if (decodedBuffer)
return decodedBuffer->byteLength;
return (bufferView ? bufferView->byteLength : sparse->data.size());
}
namespace { namespace {
inline void CopyData(size_t count, inline void CopyData(size_t count,
const uint8_t *src, size_t src_stride, const uint8_t *src, size_t src_stride,
@ -761,7 +928,7 @@ void Accessor::ExtractData(T *&outData) {
const size_t elemSize = GetElementSize(); const size_t elemSize = GetElementSize();
const size_t totalSize = elemSize * count; const size_t totalSize = elemSize * count;
const size_t stride = bufferView && bufferView->byteStride ? bufferView->byteStride : elemSize; const size_t stride = GetStride();
const size_t targetElemSize = sizeof(T); const size_t targetElemSize = sizeof(T);
@ -769,8 +936,8 @@ void Accessor::ExtractData(T *&outData) {
throw DeadlyImportError("GLTF: elemSize ", elemSize, " > targetElemSize ", targetElemSize, " in ", getContextForErrorMessages(id, name)); throw DeadlyImportError("GLTF: elemSize ", elemSize, " > targetElemSize ", targetElemSize, " in ", getContextForErrorMessages(id, name));
} }
const size_t maxSize = (bufferView ? bufferView->byteLength : sparse->data.size()); const size_t maxSize = GetMaxByteSize();
if (count*stride > maxSize) { if (count * stride > maxSize) {
throw DeadlyImportError("GLTF: count*stride ", (count * stride), " > maxSize ", maxSize, " in ", getContextForErrorMessages(id, name)); throw DeadlyImportError("GLTF: count*stride ", (count * stride), " > maxSize ", maxSize, " in ", getContextForErrorMessages(id, name));
} }
@ -828,14 +995,14 @@ inline Accessor::Indexer::Indexer(Accessor &acc) :
accessor(acc), accessor(acc),
data(acc.GetPointer()), data(acc.GetPointer()),
elemSize(acc.GetElementSize()), elemSize(acc.GetElementSize()),
stride(acc.bufferView && acc.bufferView->byteStride ? acc.bufferView->byteStride : elemSize) { stride(acc.GetStride()) {
} }
//! Accesses the i-th value as defined by the accessor //! Accesses the i-th value as defined by the accessor
template <class T> template <class T>
T Accessor::Indexer::GetValue(int i) { T Accessor::Indexer::GetValue(int i) {
ai_assert(data); ai_assert(data);
ai_assert(i * stride < accessor.bufferView->byteLength); ai_assert(i * stride < accessor.GetMaxByteSize());
// Ensure that the memcpy doesn't overwrite the local. // Ensure that the memcpy doesn't overwrite the local.
const size_t sizeToCopy = std::min(elemSize, sizeof(T)); const size_t sizeToCopy = std::min(elemSize, sizeof(T));
T value = T(); T value = T();
@ -872,8 +1039,7 @@ inline void Image::Read(Value &obj, Asset &r) {
if (Value *mtype = FindString(obj, "mimeType")) { if (Value *mtype = FindString(obj, "mimeType")) {
this->mimeType = mtype->GetString(); this->mimeType = mtype->GetString();
} }
if (!this->bufferView || this->mimeType.empty()) if (!this->bufferView || this->mimeType.empty()) {
{
throw DeadlyImportError("GLTF2: ", getContextForErrorMessages(id, name), " does not have a URI, so it must have a valid bufferView and mimetype"); throw DeadlyImportError("GLTF2: ", getContextForErrorMessages(id, name), " does not have a URI, so it must have a valid bufferView and mimetype");
} }
@ -884,10 +1050,8 @@ inline void Image::Read(Value &obj, Asset &r) {
this->mData.reset(new uint8_t[this->mDataLength]); this->mData.reset(new uint8_t[this->mDataLength]);
memcpy(this->mData.get(), buffer->GetPointer() + this->bufferView->byteOffset, this->mDataLength); memcpy(this->mData.get(), buffer->GetPointer() + this->bufferView->byteOffset, this->mDataLength);
} } else {
else throw DeadlyImportError("GLTF2: ", getContextForErrorMessages(id, name), " should have either a URI of a bufferView and mimetype");
{
throw DeadlyImportError("GLTF2: ", getContextForErrorMessages(id, name), " should have either a URI of a bufferView and mimetype" );
} }
} }
} }
@ -946,9 +1110,8 @@ inline void Texture::Read(Value &obj, Asset &r) {
namespace { namespace {
inline void SetTextureProperties(Asset &r, Value *prop, TextureInfo &out) { inline void SetTextureProperties(Asset &r, Value *prop, TextureInfo &out) {
if (r.extensionsUsed.KHR_texture_transform) { if (r.extensionsUsed.KHR_texture_transform) {
if (Value *extensions = FindObject(*prop, "extensions")) { if (Value *pKHR_texture_transform = FindExtension(*prop, "KHR_texture_transform")) {
out.textureTransformSupported = true; out.textureTransformSupported = true;
if (Value *pKHR_texture_transform = FindObject(*extensions, "KHR_texture_transform")) {
if (Value *array = FindArray(*pKHR_texture_transform, "offset")) { if (Value *array = FindArray(*pKHR_texture_transform, "offset")) {
out.TextureTransformExt_t.offset[0] = (*array)[0].GetFloat(); out.TextureTransformExt_t.offset[0] = (*array)[0].GetFloat();
out.TextureTransformExt_t.offset[1] = (*array)[1].GetFloat(); out.TextureTransformExt_t.offset[1] = (*array)[1].GetFloat();
@ -970,7 +1133,6 @@ inline void SetTextureProperties(Asset &r, Value *prop, TextureInfo &out) {
} }
} }
} }
}
if (Value *index = FindUInt(*prop, "index")) { if (Value *index = FindUInt(*prop, "index")) {
out.texture = r.textures.Retrieve(index->GetUint()); out.texture = r.textures.Retrieve(index->GetUint());
@ -1043,8 +1205,7 @@ inline void Material::Read(Value &material, Asset &r) {
} }
} }
if (r.extensionsUsed.KHR_texture_transform) { // Extension KHR_texture_transform is handled in ReadTextureProperty
}
if (r.extensionsUsed.KHR_materials_sheen) { if (r.extensionsUsed.KHR_materials_sheen) {
if (Value *curMaterialSheen = FindObject(*extensions, "KHR_materials_sheen")) { if (Value *curMaterialSheen = FindObject(*extensions, "KHR_materials_sheen")) {
@ -1106,12 +1267,12 @@ void SetVector(vec3 &v, const float (&in)[3]) {
inline void Material::SetDefaults() { inline void Material::SetDefaults() {
//pbr materials //pbr materials
SetVector(pbrMetallicRoughness.baseColorFactor, defaultBaseColor); SetVector(pbrMetallicRoughness.baseColorFactor, defaultBaseColor);
pbrMetallicRoughness.metallicFactor = 1.0; pbrMetallicRoughness.metallicFactor = 1.0f;
pbrMetallicRoughness.roughnessFactor = 1.0; pbrMetallicRoughness.roughnessFactor = 1.0f;
SetVector(emissiveFactor, defaultEmissiveFactor); SetVector(emissiveFactor, defaultEmissiveFactor);
alphaMode = "OPAQUE"; alphaMode = "OPAQUE";
alphaCutoff = 0.5; alphaCutoff = 0.5f;
doubleSided = false; doubleSided = false;
unlit = false; unlit = false;
} }
@ -1120,7 +1281,7 @@ inline void PbrSpecularGlossiness::SetDefaults() {
//pbrSpecularGlossiness properties //pbrSpecularGlossiness properties
SetVector(diffuseFactor, defaultDiffuseFactor); SetVector(diffuseFactor, defaultDiffuseFactor);
SetVector(specularFactor, defaultSpecularFactor); SetVector(specularFactor, defaultSpecularFactor);
glossinessFactor = 1.0; glossinessFactor = 1.0f;
} }
inline void MaterialSheen::SetDefaults() { inline void MaterialSheen::SetDefaults() {
@ -1192,6 +1353,14 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) {
Primitive &prim = this->primitives[i]; Primitive &prim = this->primitives[i];
prim.mode = MemberOrDefault(primitive, "mode", PrimitiveMode_TRIANGLES); prim.mode = MemberOrDefault(primitive, "mode", PrimitiveMode_TRIANGLES);
if (Value *indices = FindUInt(primitive, "indices")) {
prim.indices = pAsset_Root.accessors.Retrieve(indices->GetUint());
}
if (Value *material = FindUInt(primitive, "material")) {
prim.material = pAsset_Root.materials.Retrieve(material->GetUint());
}
if (Value *attrs = FindObject(primitive, "attributes")) { if (Value *attrs = FindObject(primitive, "attributes")) {
for (Value::MemberIterator it = attrs->MemberBegin(); it != attrs->MemberEnd(); ++it) { for (Value::MemberIterator it = attrs->MemberBegin(); it != attrs->MemberEnd(); ++it) {
if (!it->value.IsUint()) continue; if (!it->value.IsUint()) continue;
@ -1200,11 +1369,12 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) {
// and WEIGHT.Attribute semantics can be of the form[semantic]_[set_index], e.g., TEXCOORD_0, TEXCOORD_1, etc. // and WEIGHT.Attribute semantics can be of the form[semantic]_[set_index], e.g., TEXCOORD_0, TEXCOORD_1, etc.
int undPos = 0; int undPos = 0;
Mesh::AccessorList *vec = 0; Mesh::AccessorList *vec = nullptr;
if (GetAttribVector(prim, attr, vec, undPos)) { if (GetAttribVector(prim, attr, vec, undPos)) {
size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0; size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0;
if ((*vec).size() != idx) { if ((*vec).size() != idx) {
throw DeadlyImportError("GLTF: Invalid attribute: ", attr, ". All indices for indexed attribute semantics must start with 0 and be continuous positive integers: TEXCOORD_0, TEXCOORD_1, etc."); throw DeadlyImportError("GLTF: Invalid attribute in mesh: ", name, " primitive: ", i, "attrib: ", attr,
". All indices for indexed attribute semantics must start with 0 and be continuous positive integers: TEXCOORD_0, TEXCOORD_1, etc.");
} }
(*vec).resize(idx + 1); (*vec).resize(idx + 1);
(*vec)[idx] = pAsset_Root.accessors.Retrieve(it->value.GetUint()); (*vec)[idx] = pAsset_Root.accessors.Retrieve(it->value.GetUint());
@ -1212,6 +1382,69 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) {
} }
} }
#ifdef ASSIMP_ENABLE_DRACO
// KHR_draco_mesh_compression spec: Draco can only be used for glTF Triangles or Triangle Strips
if (pAsset_Root.extensionsUsed.KHR_draco_mesh_compression && (prim.mode == PrimitiveMode_TRIANGLES || prim.mode == PrimitiveMode_TRIANGLE_STRIP)) {
// Look for draco mesh compression extension and bufferView
// Skip if any missing
if (Value *dracoExt = FindExtension(primitive, "KHR_draco_mesh_compression")) {
if (Value *bufView = FindUInt(*dracoExt, "bufferView")) {
// Attempt to load indices and attributes using draco compression
auto bufferView = pAsset_Root.bufferViews.Retrieve(bufView->GetUint());
// Attempt to perform the draco decode on the buffer data
const char *bufferViewData = reinterpret_cast<const char *>(bufferView->buffer->GetPointer() + bufferView->byteOffset);
draco::DecoderBuffer decoderBuffer;
decoderBuffer.Init(bufferViewData, bufferView->byteLength);
draco::Decoder decoder;
auto decodeResult = decoder.DecodeMeshFromBuffer(&decoderBuffer);
if (!decodeResult.ok()) {
// A corrupt Draco isn't actually fatal if the primitive data is also provided in a standard buffer, but does anyone do that?
throw DeadlyImportError("GLTF: Invalid Draco mesh compression in mesh: ", name, " primitive: ", i, ": ", decodeResult.status().error_msg_string());
}
// Now we have a draco mesh
const std::unique_ptr<draco::Mesh> &pDracoMesh = decodeResult.value();
// Redirect the accessors to the decoded data
// Indices
SetDecodedIndexBuffer_Draco(*pDracoMesh, prim);
// Vertex attributes
if (Value *attrs = FindObject(*dracoExt, "attributes")) {
for (Value::MemberIterator it = attrs->MemberBegin(); it != attrs->MemberEnd(); ++it) {
if (!it->value.IsUint()) continue;
const char *attr = it->name.GetString();
int undPos = 0;
Mesh::AccessorList *vec = nullptr;
if (GetAttribVector(prim, attr, vec, undPos)) {
size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0;
if (idx >= (*vec).size()) {
throw DeadlyImportError("GLTF: Invalid draco attribute in mesh: ", name, " primitive: ", i, " attrib: ", attr,
". All indices for indexed attribute semantics must start with 0 and be continuous positive integers: TEXCOORD_0, TEXCOORD_1, etc.");
}
if (!(*vec)[idx]) {
throw DeadlyImportError("GLTF: Invalid draco attribute in mesh: ", name, " primitive: ", i, " attrib: ", attr,
". All draco-encoded attributes must also define an accessor.");
}
Accessor &attribAccessor = *(*vec)[idx];
if (attribAccessor.count == 0)
throw DeadlyImportError("GLTF: Invalid draco attribute in mesh: ", name, " primitive: ", i, " attrib: ", attr);
// Redirect this accessor to the appropriate Draco vertex attribute data
const uint32_t dracoAttribId = it->value.GetUint();
SetDecodedAttributeBuffer_Draco(*pDracoMesh, dracoAttribId, attribAccessor);
}
}
}
}
}
}
#endif
Value *targetsArray = FindArray(primitive, "targets"); Value *targetsArray = FindArray(primitive, "targets");
if (nullptr != targetsArray) { if (nullptr != targetsArray) {
prim.targets.resize(targetsArray->Size()); prim.targets.resize(targetsArray->Size());
@ -1227,7 +1460,7 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) {
const char *attr = it->name.GetString(); const char *attr = it->name.GetString();
// Valid attribute semantics include POSITION, NORMAL, TANGENT // Valid attribute semantics include POSITION, NORMAL, TANGENT
int undPos = 0; int undPos = 0;
Mesh::AccessorList *vec = 0; Mesh::AccessorList *vec = nullptr;
if (GetAttribTargetVector(prim, j, attr, vec, undPos)) { if (GetAttribTargetVector(prim, j, attr, vec, undPos)) {
size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0; size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0;
if ((*vec).size() <= idx) { if ((*vec).size() <= idx) {
@ -1238,14 +1471,6 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) {
} }
} }
} }
if (Value *indices = FindUInt(primitive, "indices")) {
prim.indices = pAsset_Root.accessors.Retrieve(indices->GetUint());
}
if (Value *material = FindUInt(primitive, "material")) {
prim.material = pAsset_Root.materials.Retrieve(material->GetUint());
}
} }
} }
@ -1331,25 +1556,22 @@ inline void Light::Read(Value &obj, Asset & /*r*/) {
} }
} }
inline CustomExtension ReadExtensions(const char *name, Value& obj) { inline CustomExtension ReadExtensions(const char *name, Value &obj) {
CustomExtension ret; CustomExtension ret;
ret.name = name; ret.name = name;
if (obj.IsObject()) { if (obj.IsObject()) {
ret.mValues.isPresent = true; ret.mValues.isPresent = true;
for (auto it = obj.MemberBegin(); it != obj.MemberEnd(); ++it) { for (auto it = obj.MemberBegin(); it != obj.MemberEnd(); ++it) {
auto& val = it->value; auto &val = it->value;
ret.mValues.value.push_back(ReadExtensions(it->name.GetString(), val)); ret.mValues.value.push_back(ReadExtensions(it->name.GetString(), val));
} }
} } else if (obj.IsArray()) {
else if (obj.IsArray()) {
ret.mValues.value.reserve(obj.Size()); ret.mValues.value.reserve(obj.Size());
ret.mValues.isPresent = true; ret.mValues.isPresent = true;
for (unsigned int i = 0; i < obj.Size(); ++i) for (unsigned int i = 0; i < obj.Size(); ++i) {
{
ret.mValues.value.push_back(ReadExtensions(name, obj[i])); ret.mValues.value.push_back(ReadExtensions(name, obj[i]));
} }
} } else if (obj.IsNumber()) {
else if (obj.IsNumber()) {
if (obj.IsUint64()) { if (obj.IsUint64()) {
ret.mUint64Value.value = obj.GetUint64(); ret.mUint64Value.value = obj.GetUint64();
ret.mUint64Value.isPresent = true; ret.mUint64Value.isPresent = true;
@ -1360,12 +1582,10 @@ inline CustomExtension ReadExtensions(const char *name, Value& obj) {
ret.mDoubleValue.value = obj.GetDouble(); ret.mDoubleValue.value = obj.GetDouble();
ret.mDoubleValue.isPresent = true; ret.mDoubleValue.isPresent = true;
} }
} } else if (obj.IsString()) {
else if (obj.IsString()) {
ReadValue(obj, ret.mStringValue); ReadValue(obj, ret.mStringValue);
ret.mStringValue.isPresent = true; ret.mStringValue.isPresent = true;
} } else if (obj.IsBool()) {
else if (obj.IsBool()) {
ret.mBoolValue.value = obj.GetBool(); ret.mBoolValue.value = obj.GetBool();
ret.mBoolValue.isPresent = true; ret.mBoolValue.isPresent = true;
} }
@ -1693,10 +1913,12 @@ inline void Asset::Load(const std::string &pFile, bool isBinary) {
ReadExtensionsUsed(doc); ReadExtensionsUsed(doc);
ReadExtensionsRequired(doc); ReadExtensionsRequired(doc);
// Currently Draco is not supported #ifndef ASSIMP_ENABLE_DRACO
// Is Draco required?
if (extensionsRequired.KHR_draco_mesh_compression) { if (extensionsRequired.KHR_draco_mesh_compression) {
throw DeadlyImportError("GLTF: Draco mesh compression not currently supported."); throw DeadlyImportError("GLTF: Draco mesh compression not supported.");
} }
#endif
// Prepare the dictionaries // Prepare the dictionaries
for (size_t i = 0; i < mDicts.size(); ++i) { for (size_t i = 0; i < mDicts.size(); ++i) {
@ -1784,6 +2006,7 @@ inline void Asset::ReadExtensionsUsed(Document &doc) {
CHECK_EXT(KHR_materials_sheen); CHECK_EXT(KHR_materials_sheen);
CHECK_EXT(KHR_materials_clearcoat); CHECK_EXT(KHR_materials_clearcoat);
CHECK_EXT(KHR_materials_transmission); CHECK_EXT(KHR_materials_transmission);
CHECK_EXT(KHR_draco_mesh_compression);
#undef CHECK_EXT #undef CHECK_EXT
} }
@ -1792,12 +2015,12 @@ inline IOStream *Asset::OpenFile(std::string path, const char *mode, bool /*abso
#ifdef ASSIMP_API #ifdef ASSIMP_API
return mIOSystem->Open(path, mode); return mIOSystem->Open(path, mode);
#else #else
if (path.size() < 2) return 0; if (path.size() < 2) return nullptr;
if (!absolute && path[1] != ':' && path[0] != '/') { // relative? if (!absolute && path[1] != ':' && path[0] != '/') { // relative?
path = mCurrentAssetDir + path; path = mCurrentAssetDir + path;
} }
FILE *f = fopen(path.c_str(), mode); FILE *f = fopen(path.c_str(), mode);
return f ? new IOStream(f) : 0; return f ? new IOStream(f) : nullptr;
#endif #endif
} }
@ -1831,7 +2054,7 @@ inline std::string Asset::FindUniqueID(const std::string &str, const char *suffi
} }
#if _MSC_VER #if _MSC_VER
# pragma warning(pop) #pragma warning(pop)
#endif // _MSC_VER #endif // _MSC_VER
} // namespace glTF2 } // namespace glTF2

View File

@ -1121,6 +1121,11 @@ IF (ASSIMP_BUILD_NONFREE_C4D_IMPORTER)
INCLUDE_DIRECTORIES(${C4D_INCLUDES}) INCLUDE_DIRECTORIES(${C4D_INCLUDES})
ENDIF () ENDIF ()
IF (ASSIMP_BUILD_DRACO)
INCLUDE_DIRECTORIES(${draco_INCLUDE_DIRS})
ADD_DEFINITIONS( -DASSIMP_ENABLE_DRACO )
ENDIF()
ADD_LIBRARY( assimp ${assimp_src} ) ADD_LIBRARY( assimp ${assimp_src} )
ADD_LIBRARY(assimp::assimp ALIAS assimp) ADD_LIBRARY(assimp::assimp ALIAS assimp)
@ -1152,8 +1157,15 @@ IF(ASSIMP_HUNTER_ENABLED)
zip::zip zip::zip
pugixml pugixml
) )
if (ASSIMP_BUILD_DRACO)
target_link_libraries(assimp PUBLIC ${draco_LIBRARIES})
endif()
ELSE() ELSE()
TARGET_LINK_LIBRARIES(assimp ${ZLIB_LIBRARIES} ${OPENDDL_PARSER_LIBRARIES} ) TARGET_LINK_LIBRARIES(assimp ${ZLIB_LIBRARIES} ${OPENDDL_PARSER_LIBRARIES})
if (ASSIMP_BUILD_DRACO)
target_link_libraries(assimp ${draco_LIBRARIES})
endif()
ENDIF() ENDIF()
if(ASSIMP_ANDROID_JNIIOSYSTEM) if(ASSIMP_ANDROID_JNIIOSYSTEM)

View File

@ -0,0 +1,5 @@
---
Language: Cpp
BasedOnStyle: Google
PointerAlignment: Right
...

View File

@ -0,0 +1,102 @@
# Generated with cmake-format 0.5.1
# How wide to allow formatted cmake files
line_width = 80
# How many spaces to tab for indent
tab_size = 2
# If arglists are longer than this, break them always
max_subargs_per_line = 10
# If true, separate flow control names from their parentheses with a space
separate_ctrl_name_with_space = False
# If true, separate function names from parentheses with a space
separate_fn_name_with_space = False
# If a statement is wrapped to more than one line, than dangle the closing
# parenthesis on its own line
dangle_parens = False
# What character to use for bulleted lists
bullet_char = '*'
# What character to use as punctuation after numerals in an enumerated list
enum_char = '.'
# What style line endings to use in the output.
line_ending = u'unix'
# Format command names consistently as 'lower' or 'upper' case
command_case = u'lower'
# Format keywords consistently as 'lower' or 'upper' case
keyword_case = u'unchanged'
# Specify structure for custom cmake functions
additional_commands = {
"foo": {
"flags": [
"BAR",
"BAZ"
],
"kwargs": {
"HEADERS": "*",
"DEPENDS": "*",
"SOURCES": "*"
}
}
}
# A list of command names which should always be wrapped
always_wrap = []
# Specify the order of wrapping algorithms during successive reflow attempts
algorithm_order = [0, 1, 2, 3, 4]
# If true, the argument lists which are known to be sortable will be sorted
# lexicographicall
autosort = False
# enable comment markup parsing and reflow
enable_markup = True
# If comment markup is enabled, don't reflow the first comment block in
# eachlistfile. Use this to preserve formatting of your
# copyright/licensestatements.
first_comment_is_literal = False
# If comment markup is enabled, don't reflow any comment block which matchesthis
# (regex) pattern. Default is `None` (disabled).
literal_comment_pattern = None
# Regular expression to match preformat fences in comments
# default=r'^\s*([`~]{3}[`~]*)(.*)$'
fence_pattern = u'^\\s*([`~]{3}[`~]*)(.*)$'
# Regular expression to match rulers in comments
# default=r'^\s*[^\w\s]{3}.*[^\w\s]{3}$'
ruler_pattern = u'^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$'
# If true, emit the unicode byte-order mark (BOM) at the start of the file
emit_byteorder_mark = False
# If a comment line starts with at least this many consecutive hash characters,
# then don't lstrip() them off. This allows for lazy hash rulers where the first
# hash char is not separated by space
hashruler_min_length = 10
# If true, then insert a space between the first hash char and remaining hash
# chars in a hash ruler, and normalize its length to fill the column
canonicalize_hashrulers = True
# Specify the encoding of the input file. Defaults to utf-8.
input_encoding = u'utf-8'
# Specify the encoding of the output file. Defaults to utf-8. Note that cmake
# only claims to support utf-8 so be careful when using anything else
output_encoding = u'utf-8'
# A dictionary containing any per-command configuration overrides. Currently
# only `command_case` is supported.
per_command = {}

1
contrib/draco/.gitignore vendored 100644
View File

@ -0,0 +1 @@
docs/_site

View File

@ -0,0 +1 @@
2.3.0

View File

@ -0,0 +1,31 @@
cache: ccache
language: cpp
matrix:
include:
- os: linux
dist: xenial
compiler: clang
- os: linux
dist: xenial
compiler: gcc
- os: osx
compiler: clang
addons:
apt:
packages:
- cmake
script:
# Output version info for compilers, cmake, and make
- ${CC} -v
- ${CXX} -v
- cmake --version
- make --version
# Clone googletest
- pushd .. && git clone https://github.com/google/googletest.git && popd
# Configure and build
- mkdir _travis_build && cd _travis_build
- cmake -G "Unix Makefiles" -DENABLE_TESTS=ON ..
- make -j10
- ./draco_tests

View File

@ -0,0 +1,7 @@
# This is the list of Draco authors for copyright purposes.
#
# This does not necessarily list everyone who has contributed code, since in
# some cases, their employer may be the copyright holder. To see the full list
# of contributors, see the revision history in source control.
Google Inc.
and other contributors

View File

@ -0,0 +1,301 @@
_**Contents**_
* [CMake Basics](#cmake-basics)
* [Mac OS X](#mac-os-x)
* [Windows](#windows)
* [CMake Build Configuration](#cmake-build-configuration)
* [Debugging and Optimization](#debugging-and-optimization)
* [Googletest Integration](#googletest-integration)
* [Javascript Encoder/Decoder](#javascript-encoderdecoder)
* [WebAssembly Decoder](#webassembly-decoder)
* [WebAssembly Mesh Only Decoder](#webassembly-mesh-only-decoder)
* [WebAssembly Point Cloud Only Decoder](#webassembly-point-cloud-only-decoder)
* [iOS Builds](#ios-builds)
* [Android Studio Project Integration](#android-studio-project-integration)
* [Native Android Builds](#native-android-builds)
* [vcpkg](#vcpkg)
Building
========
For all platforms, you must first generate the project/make files and then
compile the examples.
CMake Basics
------------
To generate project/make files for the default toolchain on your system, run
`cmake` from a directory where you would like to generate build files, and pass
it the path to your Draco repository.
E.g. Starting from Draco root.
~~~~~ bash
$ mkdir build_dir && cd build_dir
$ cmake ../
~~~~~
On Windows, the above command will produce Visual Studio project files for the
newest Visual Studio detected on the system. On Mac OS X and Linux systems,
the above command will produce a `makefile`.
To control what types of projects are generated, add the `-G` parameter to the
`cmake` command. This argument must be followed by the name of a generator.
Running `cmake` with the `--help` argument will list the available
generators for your system.
Mac OS X
---------
On Mac OS X, run the following command to generate Xcode projects:
~~~~~ bash
$ cmake ../ -G Xcode
~~~~~
Windows
-------
On a Windows box you would run the following command to generate Visual Studio
2019 projects:
~~~~~ bash
C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A Win32
~~~~~
To generate 64-bit Windows Visual Studio 2019 projects:
~~~~~ bash
C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A x64
~~~~~
CMake Build Configuration
-------------------------
Debugging and Optimization
--------------------------
Unlike Visual Studio and Xcode projects, the build configuration for make
builds is controlled when you run `cmake`. The following examples demonstrate
various build configurations.
Omitting the build type produces makefiles that use release build flags
by default:
~~~~~ bash
$ cmake ../
~~~~~
A makefile using release (optimized) flags is produced like this:
~~~~~ bash
$ cmake ../ -DCMAKE_BUILD_TYPE=Release
~~~~~
A release build with debug info can be produced as well:
~~~~~ bash
$ cmake ../ -DCMAKE_BUILD_TYPE=RelWithDebInfo
~~~~~
And your standard debug build will be produced using:
~~~~~ bash
$ cmake ../ -DCMAKE_BUILD_TYPE=Debug
~~~~~
To enable the use of sanitizers when the compiler in use supports them, set the
sanitizer type when running CMake:
~~~~~ bash
$ cmake ../ -DDRACO_SANITIZE=address
~~~~~
Googletest Integration
----------------------
Draco includes testing support built using Googletest. To enable Googletest unit
test support the DRACO_TESTS cmake variable must be turned on at cmake
generation time:
~~~~~ bash
$ cmake ../ -DDRACO_TESTS=ON
~~~~~
When cmake is used as shown in the above example the googletest directory must
be a sibling of the Draco repository root directory. To run the tests execute
`draco_tests` from your build output directory.
WebAssembly Decoder
-------------------
The WebAssembly decoder can be built using the existing cmake build file by
passing the path the Emscripten's cmake toolchain file at cmake generation time
in the CMAKE_TOOLCHAIN_FILE variable and enabling the WASM build option.
In addition, the EMSCRIPTEN environment variable must be set to the local path
of the parent directory of the Emscripten tools directory.
~~~~~ bash
# Make the path to emscripten available to cmake.
$ export EMSCRIPTEN=/path/to/emscripten/tools/parent
# Emscripten.cmake can be found within your Emscripten installation directory,
# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON
# Build the WebAssembly decoder.
$ make
# Run the Javascript wrapper through Closure.
$ java -jar closure.jar --compilation_level SIMPLE --js draco_decoder.js --js_output_file draco_wasm_wrapper.js
~~~~~
WebAssembly Mesh Only Decoder
-----------------------------
~~~~~ bash
# cmake command line for mesh only WebAssembly decoder.
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_POINT_CLOUD_COMPRESSION=OFF
~~~~~
WebAssembly Point Cloud Only Decoder
-----------------------------
~~~~~ bash
# cmake command line for point cloud only WebAssembly decoder.
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_MESH_COMPRESSION=OFF
~~~~~
Javascript Encoder/Decoder
------------------
The javascript encoder and decoder can be built using the existing cmake build
file by passing the path the Emscripten's cmake toolchain file at cmake
generation time in the CMAKE_TOOLCHAIN_FILE variable.
In addition, the EMSCRIPTEN environment variable must be set to the local path
of the parent directory of the Emscripten tools directory.
*Note* The WebAssembly decoder should be favored over the JavaScript decoder.
~~~~~ bash
# Make the path to emscripten available to cmake.
$ export EMSCRIPTEN=/path/to/emscripten/tools/parent
# Emscripten.cmake can be found within your Emscripten installation directory,
# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake
# Build the Javascript encoder and decoder.
$ make
~~~~~
iOS Builds
---------------------
These are the basic commands needed to build Draco for iOS targets.
~~~~~ bash
#arm64
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/arm64-ios.cmake
$ make
#x86_64
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/x86_64-ios.cmake
$ make
#armv7
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/armv7-ios.cmake
$ make
#i386
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/i386-ios.cmake
$ make
~~~~~~
After building for each target the libraries can be merged into a single
universal/fat library using lipo, and then used in iOS applications.
Native Android Builds
---------------------
It's sometimes useful to build Draco command line tools and run them directly on
Android devices via adb.
~~~~~ bash
# This example is for armeabi-v7a.
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/android.cmake \
-DDRACO_ANDROID_NDK_PATH=path/to/ndk -DANDROID_ABI=armeabi-v7a
$ make
# See the android.cmake toolchain file for additional ANDROID_ABI options and
# other configurable Android variables.
~~~~~
After building the tools they can be moved to an android device via the use of
`adb push`, and then run within an `adb shell` instance.
Android Studio Project Integration
----------------------------------
Tested on Android Studio 3.5.3.
Draco - Static Library
----------------------
To include Draco in an existing or new Android Studio project, reference it
from the `cmake` file of an existing native project that has a minimum SDK
version of 18 or higher. The project must support C++11.
To add Draco to your project:
1. Create a new "Native C++" project.
2. Add the following somewhere within the `CMakeLists.txt` for your project
before the `add_library()` for your project's native-lib:
~~~~~ cmake
# Note "/path/to/draco" must be changed to the path where you have cloned
# the Draco sources.
add_subdirectory(/path/to/draco
${CMAKE_BINARY_DIR}/draco_build)
include_directories("${CMAKE_BINARY_DIR}" /path/to/draco)
~~~~~
3. Add the library target "draco" to the `target_link_libraries()` call for
your project's native-lib. The `target_link_libraries()` call for an
empty activity native project looks like this after the addition of
Draco:
~~~~~ cmake
target_link_libraries( # Specifies the target library.
native-lib
# Tells cmake this build depends on libdraco.
draco
# Links the target library to the log library
# included in the NDK.
${log-lib} )
vcpkg
---------------------
You can download and install Draco using the
[vcpkg](https://github.com/Microsoft/vcpkg/) dependency manager:
git clone https://github.com/Microsoft/vcpkg.git
cd vcpkg
./bootstrap-vcpkg.sh
./vcpkg integrate install
vcpkg install draco
The Draco port in vcpkg is kept up to date by Microsoft team members and
community contributors. If the version is out of date, please
[create an issue or pull request](https://github.com/Microsoft/vcpkg) on the
vcpkg repository.

View File

@ -0,0 +1,106 @@
# CMake Build System Overview
[TOC]
This document provides a general layout of the Draco CMake build system.
## Core Build System Files
These files are listed in order of interest to maintainers of the build system.
- `CMakeLists.txt` is the main driver of the build system. It's responsible
for defining targets and source lists, surfacing build system options, and
tying the components of the build system together.
- `cmake/draco_build_definitions.cmake` defines the macro
`draco_set_build_definitions()`, which is called from `CMakeLists.txt` to
configure include paths, compiler and linker flags, library settings,
platform speficic configuration, and other build system settings that
depend on optional build configurations.
- `cmake/draco_targets.cmake` defines the macros `draco_add_library()` and
`draco_add_executable()` which are used to create all targets in the CMake
build. These macros attempt to behave in a manner that loosely mirrors the
blaze `cc_library()` and `cc_binary()` commands. Note that
`draco_add_executable()` is also used for tests.
- `cmake/draco_emscripten.cmake` handles Emscripten SDK integration. It
defines several Emscripten specific macros that are required to build the
Emscripten specific targets defined in `CMakeLists.txt`.
- `cmake/draco_flags.cmake` defines macros related to compiler and linker
flags. Testing macros, macros for isolating flags to specific source files,
and the main flag configuration function for the library are defined here.
- `cmake/draco_options.cmake` defines macros that control optional features
of draco, and help track draco library and build system options.
- `cmake/draco_install.cmake` defines the draco install target.
- `cmake/draco_cpu_detection.cmake` determines the optimization types to
enable based on target system processor as reported by CMake.
- `cmake/draco_intrinsics.cmake` manages flags for source files that use
intrinsics. It handles detection of whether flags are necessary, and the
application of the flags to the sources that need them when they are
required.
## Helper and Utility Files
- `.cmake-format.py` Defines coding style for cmake-format.
- `cmake/draco_helpers.cmake` defines utility macros.
- `cmake/draco_sanitizer.cmake` defines the `draco_configure_sanitizer()`
macro, which implements support for `DRACO_SANITIZE`. It handles the
compiler and linker flags necessary for using sanitizers like asan and msan.
- `cmake/draco_variables.cmake` defines macros for tracking and control of
draco build system variables.
## Toolchain Files
These files help facilitate cross compiling of draco for various targets.
- `cmake/toolchains/aarch64-linux-gnu.cmake` provides cross compilation
support for arm64 targets.
- `cmake/toolchains/android.cmake` provides cross compilation support for
Android targets.
- `cmake/toolchains/arm-linux-gnueabihf.cmake` provides cross compilation
support for armv7 targets.
- `cmake/toolchains/arm64-ios.cmake`, `cmake/toolchains/armv7-ios.cmake`,
and `cmake/toolchains/armv7s-ios.cmake` provide support for iOS.
- `cmake/toolchains/arm64-linux-gcc.cmake` and
`cmake/toolchains/armv7-linux-gcc.cmake` are deprecated, but remain for
compatibility. `cmake/toolchains/android.cmake` should be used instead.
- `cmake/toolchains/arm64-android-ndk-libcpp.cmake`,
`cmake/toolchains/armv7-android-ndk-libcpp.cmake`,
`cmake/toolchains/x86-android-ndk-libcpp.cmake`, and
`cmake/toolchains/x86_64-android-ndk-libcpp.cmake` are deprecated, but
remain for compatibility. `cmake/toolchains/android.cmake` should be used
instead.
- `cmake/toolchains/i386-ios.cmake` and `cmake/toolchains/x86_64-ios.cmake`
provide support for the iOS simulator.
- `cmake/toolchains/android-ndk-common.cmake` and
`cmake/toolchains/arm-ios-common.cmake` are support files used by other
toolchain files.
## Template Files
These files are inputs to the CMake build and are used to generate inputs to the
build system output by CMake.
- `cmake/draco-config.cmake.template` is used to produce
draco-config.cmake. draco-config.cmake can be used by CMake to find draco
when another CMake project depends on draco.
- `cmake/draco.pc.template` is used to produce draco's pkg-config file.
Some build systems use pkg-config to configure include and library paths
when they depend upon third party libraries like draco.

View File

@ -0,0 +1,958 @@
cmake_minimum_required(VERSION 3.12 FATAL_ERROR)
# Draco requires C++11.
set(CMAKE_CXX_STANDARD 11)
project(draco C CXX)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
set(draco_root "${CMAKE_CURRENT_SOURCE_DIR}")
set(draco_src_root "${draco_root}/src/draco")
set(draco_build "${CMAKE_BINARY_DIR}")
if("${draco_root}" STREQUAL "${draco_build}")
message(
FATAL_ERROR "Building from within the Draco source tree is not supported.\n"
"Hint: Run these commands\n"
"$ rm -rf CMakeCache.txt CMakeFiles\n"
"$ mkdir -p ../draco_build\n" "$ cd ../draco_build\n"
"And re-run CMake from the draco_build directory.")
endif()
include(CMakePackageConfigHelpers)
include(FindPythonInterp)
include("${draco_root}/cmake/draco_build_definitions.cmake")
include("${draco_root}/cmake/draco_cpu_detection.cmake")
include("${draco_root}/cmake/draco_emscripten.cmake")
include("${draco_root}/cmake/draco_flags.cmake")
include("${draco_root}/cmake/draco_helpers.cmake")
include("${draco_root}/cmake/draco_install.cmake")
include("${draco_root}/cmake/draco_intrinsics.cmake")
include("${draco_root}/cmake/draco_options.cmake")
include("${draco_root}/cmake/draco_sanitizer.cmake")
include("${draco_root}/cmake/draco_targets.cmake")
include("${draco_root}/cmake/draco_tests.cmake")
include("${draco_root}/cmake/draco_variables.cmake")
# C++ and linker flags.
draco_track_configuration_variable(DRACO_CXX_FLAGS)
draco_track_configuration_variable(DRACO_EXE_LINKER_FLAGS)
# Sanitizer integration.
draco_track_configuration_variable(DRACO_SANITIZE)
# Generated source file directory.
draco_track_configuration_variable(DRACO_GENERATED_SOURCES_DIRECTORY)
# Controls use of std::mutex and absl::Mutex in ThreadPool.
draco_track_configuration_variable(DRACO_THREADPOOL_USE_STD_MUTEX)
if(DRACO_VERBOSE)
draco_dump_cmake_flag_variables()
draco_dump_tracked_configuration_variables()
draco_dump_options()
endif()
# Compiler/linker flags must be lists, but come in from the environment as
# strings. Break them up:
if(NOT "${DRACO_CXX_FLAGS}" STREQUAL "")
separate_arguments(DRACO_CXX_FLAGS)
endif()
if(NOT "${DRACO_EXE_LINKER_FLAGS}" STREQUAL "")
separate_arguments(DRACO_EXE_LINKER_FLAGS)
endif()
draco_reset_target_lists()
draco_setup_options()
draco_set_build_definitions()
draco_set_cxx_flags()
draco_generate_features_h()
# Draco source file listing variables.
list(APPEND draco_attributes_sources
"${draco_src_root}/attributes/attribute_octahedron_transform.cc"
"${draco_src_root}/attributes/attribute_octahedron_transform.h"
"${draco_src_root}/attributes/attribute_quantization_transform.cc"
"${draco_src_root}/attributes/attribute_quantization_transform.h"
"${draco_src_root}/attributes/attribute_transform.cc"
"${draco_src_root}/attributes/attribute_transform.h"
"${draco_src_root}/attributes/attribute_transform_data.h"
"${draco_src_root}/attributes/attribute_transform_type.h"
"${draco_src_root}/attributes/geometry_attribute.cc"
"${draco_src_root}/attributes/geometry_attribute.h"
"${draco_src_root}/attributes/geometry_indices.h"
"${draco_src_root}/attributes/point_attribute.cc"
"${draco_src_root}/attributes/point_attribute.h")
list(
APPEND
draco_compression_attributes_dec_sources
"${draco_src_root}/compression/attributes/attributes_decoder.cc"
"${draco_src_root}/compression/attributes/attributes_decoder.h"
"${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.cc"
"${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.h"
"${draco_src_root}/compression/attributes/kd_tree_attributes_shared.h"
"${draco_src_root}/compression/attributes/mesh_attribute_indices_encoding_data.h"
"${draco_src_root}/compression/attributes/normal_compression_utils.h"
"${draco_src_root}/compression/attributes/point_d_vector.h"
"${draco_src_root}/compression/attributes/sequential_attribute_decoder.cc"
"${draco_src_root}/compression/attributes/sequential_attribute_decoder.h"
"${draco_src_root}/compression/attributes/sequential_attribute_decoders_controller.cc"
"${draco_src_root}/compression/attributes/sequential_attribute_decoders_controller.h"
"${draco_src_root}/compression/attributes/sequential_integer_attribute_decoder.cc"
"${draco_src_root}/compression/attributes/sequential_integer_attribute_decoder.h"
"${draco_src_root}/compression/attributes/sequential_normal_attribute_decoder.cc"
"${draco_src_root}/compression/attributes/sequential_normal_attribute_decoder.h"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.cc"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.h"
)
list(
APPEND
draco_compression_attributes_enc_sources
"${draco_src_root}/compression/attributes/attributes_encoder.cc"
"${draco_src_root}/compression/attributes/attributes_encoder.h"
"${draco_src_root}/compression/attributes/kd_tree_attributes_encoder.cc"
"${draco_src_root}/compression/attributes/kd_tree_attributes_encoder.h"
"${draco_src_root}/compression/attributes/linear_sequencer.h"
"${draco_src_root}/compression/attributes/points_sequencer.h"
"${draco_src_root}/compression/attributes/sequential_attribute_encoder.cc"
"${draco_src_root}/compression/attributes/sequential_attribute_encoder.h"
"${draco_src_root}/compression/attributes/sequential_attribute_encoders_controller.cc"
"${draco_src_root}/compression/attributes/sequential_attribute_encoders_controller.h"
"${draco_src_root}/compression/attributes/sequential_integer_attribute_encoder.cc"
"${draco_src_root}/compression/attributes/sequential_integer_attribute_encoder.h"
"${draco_src_root}/compression/attributes/sequential_normal_attribute_encoder.cc"
"${draco_src_root}/compression/attributes/sequential_normal_attribute_encoder.h"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.cc"
"${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.h"
)
list(
APPEND
draco_compression_attributes_pred_schemes_dec_sources
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h"
)
list(
APPEND
draco_compression_attributes_pred_schemes_enc_sources
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h"
)
list(
APPEND
draco_compression_bit_coders_sources
"${draco_src_root}/compression/bit_coders/adaptive_rans_bit_coding_shared.h"
"${draco_src_root}/compression/bit_coders/adaptive_rans_bit_decoder.cc"
"${draco_src_root}/compression/bit_coders/adaptive_rans_bit_decoder.h"
"${draco_src_root}/compression/bit_coders/adaptive_rans_bit_encoder.cc"
"${draco_src_root}/compression/bit_coders/adaptive_rans_bit_encoder.h"
"${draco_src_root}/compression/bit_coders/direct_bit_decoder.cc"
"${draco_src_root}/compression/bit_coders/direct_bit_decoder.h"
"${draco_src_root}/compression/bit_coders/direct_bit_encoder.cc"
"${draco_src_root}/compression/bit_coders/direct_bit_encoder.h"
"${draco_src_root}/compression/bit_coders/folded_integer_bit_decoder.h"
"${draco_src_root}/compression/bit_coders/folded_integer_bit_encoder.h"
"${draco_src_root}/compression/bit_coders/rans_bit_decoder.cc"
"${draco_src_root}/compression/bit_coders/rans_bit_decoder.h"
"${draco_src_root}/compression/bit_coders/rans_bit_encoder.cc"
"${draco_src_root}/compression/bit_coders/rans_bit_encoder.h"
"${draco_src_root}/compression/bit_coders/symbol_bit_decoder.cc"
"${draco_src_root}/compression/bit_coders/symbol_bit_decoder.h"
"${draco_src_root}/compression/bit_coders/symbol_bit_encoder.cc"
"${draco_src_root}/compression/bit_coders/symbol_bit_encoder.h")
list(APPEND draco_enc_config_sources
"${draco_src_root}/compression/config/compression_shared.h"
"${draco_src_root}/compression/config/draco_options.h"
"${draco_src_root}/compression/config/encoder_options.h"
"${draco_src_root}/compression/config/encoding_features.h")
list(APPEND draco_dec_config_sources
"${draco_src_root}/compression/config/compression_shared.h"
"${draco_src_root}/compression/config/decoder_options.h"
"${draco_src_root}/compression/config/draco_options.h")
list(APPEND draco_compression_decode_sources
"${draco_src_root}/compression/decode.cc"
"${draco_src_root}/compression/decode.h")
list(APPEND draco_compression_encode_sources
"${draco_src_root}/compression/encode.cc"
"${draco_src_root}/compression/encode.h"
"${draco_src_root}/compression/encode_base.h"
"${draco_src_root}/compression/expert_encode.cc"
"${draco_src_root}/compression/expert_encode.h")
list(
APPEND
draco_compression_mesh_traverser_sources
"${draco_src_root}/compression/mesh/traverser/depth_first_traverser.h"
"${draco_src_root}/compression/mesh/traverser/max_prediction_degree_traverser.h"
"${draco_src_root}/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h"
"${draco_src_root}/compression/mesh/traverser/mesh_traversal_sequencer.h"
"${draco_src_root}/compression/mesh/traverser/traverser_base.h")
list(
APPEND
draco_compression_mesh_dec_sources
"${draco_src_root}/compression/mesh/mesh_decoder.cc"
"${draco_src_root}/compression/mesh/mesh_decoder.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder.cc"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl.cc"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_shared.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_decoder.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
"${draco_src_root}/compression/mesh/mesh_sequential_decoder.cc"
"${draco_src_root}/compression/mesh/mesh_sequential_decoder.h")
list(
APPEND
draco_compression_mesh_enc_sources
"${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder.cc"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl.cc"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_shared.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_encoder.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
"${draco_src_root}/compression/mesh/mesh_encoder.cc"
"${draco_src_root}/compression/mesh/mesh_encoder.h"
"${draco_src_root}/compression/mesh/mesh_sequential_encoder.cc"
"${draco_src_root}/compression/mesh/mesh_sequential_encoder.h")
list(
APPEND
draco_compression_point_cloud_dec_sources
"${draco_src_root}/compression/point_cloud/point_cloud_decoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_decoder.h"
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_decoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_decoder.h"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.h"
)
list(
APPEND
draco_compression_point_cloud_enc_sources
"${draco_src_root}/compression/point_cloud/point_cloud_encoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_encoder.h"
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoder.h"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.h"
)
list(APPEND draco_compression_entropy_sources
"${draco_src_root}/compression/entropy/ans.h"
"${draco_src_root}/compression/entropy/rans_symbol_coding.h"
"${draco_src_root}/compression/entropy/rans_symbol_decoder.h"
"${draco_src_root}/compression/entropy/rans_symbol_encoder.h"
"${draco_src_root}/compression/entropy/shannon_entropy.cc"
"${draco_src_root}/compression/entropy/shannon_entropy.h"
"${draco_src_root}/compression/entropy/symbol_decoding.cc"
"${draco_src_root}/compression/entropy/symbol_decoding.h"
"${draco_src_root}/compression/entropy/symbol_encoding.cc"
"${draco_src_root}/compression/entropy/symbol_encoding.h")
list(APPEND draco_core_sources
"${draco_src_root}/core/bit_utils.cc"
"${draco_src_root}/core/bit_utils.h"
"${draco_src_root}/core/bounding_box.cc"
"${draco_src_root}/core/bounding_box.h"
"${draco_src_root}/core/cycle_timer.cc"
"${draco_src_root}/core/cycle_timer.h"
"${draco_src_root}/core/data_buffer.cc"
"${draco_src_root}/core/data_buffer.h"
"${draco_src_root}/core/decoder_buffer.cc"
"${draco_src_root}/core/decoder_buffer.h"
"${draco_src_root}/core/divide.cc"
"${draco_src_root}/core/divide.h"
"${draco_src_root}/core/draco_index_type.h"
"${draco_src_root}/core/draco_index_type_vector.h"
"${draco_src_root}/core/draco_types.cc"
"${draco_src_root}/core/draco_types.h"
"${draco_src_root}/core/encoder_buffer.cc"
"${draco_src_root}/core/encoder_buffer.h"
"${draco_src_root}/core/hash_utils.cc"
"${draco_src_root}/core/hash_utils.h"
"${draco_src_root}/core/macros.h"
"${draco_src_root}/core/math_utils.h"
"${draco_src_root}/core/options.cc"
"${draco_src_root}/core/options.h"
"${draco_src_root}/core/quantization_utils.cc"
"${draco_src_root}/core/quantization_utils.h"
"${draco_src_root}/core/status.h"
"${draco_src_root}/core/status_or.h"
"${draco_src_root}/core/varint_decoding.h"
"${draco_src_root}/core/varint_encoding.h"
"${draco_src_root}/core/vector_d.h")
list(APPEND draco_io_sources
"${draco_src_root}/io/file_reader_factory.cc"
"${draco_src_root}/io/file_reader_factory.h"
"${draco_src_root}/io/file_reader_interface.h"
"${draco_src_root}/io/file_utils.cc"
"${draco_src_root}/io/file_utils.h"
"${draco_src_root}/io/file_writer_factory.cc"
"${draco_src_root}/io/file_writer_factory.h"
"${draco_src_root}/io/file_writer_interface.h"
"${draco_src_root}/io/file_writer_utils.h"
"${draco_src_root}/io/file_writer_utils.cc"
"${draco_src_root}/io/mesh_io.cc"
"${draco_src_root}/io/mesh_io.h"
"${draco_src_root}/io/obj_decoder.cc"
"${draco_src_root}/io/obj_decoder.h"
"${draco_src_root}/io/obj_encoder.cc"
"${draco_src_root}/io/obj_encoder.h"
"${draco_src_root}/io/parser_utils.cc"
"${draco_src_root}/io/parser_utils.h"
"${draco_src_root}/io/ply_decoder.cc"
"${draco_src_root}/io/ply_decoder.h"
"${draco_src_root}/io/ply_encoder.cc"
"${draco_src_root}/io/ply_encoder.h"
"${draco_src_root}/io/ply_property_reader.h"
"${draco_src_root}/io/ply_property_writer.h"
"${draco_src_root}/io/ply_reader.cc"
"${draco_src_root}/io/ply_reader.h"
"${draco_src_root}/io/point_cloud_io.cc"
"${draco_src_root}/io/point_cloud_io.h"
"${draco_src_root}/io/stdio_file_reader.cc"
"${draco_src_root}/io/stdio_file_reader.h"
"${draco_src_root}/io/stdio_file_writer.cc"
"${draco_src_root}/io/stdio_file_writer.h")
list(APPEND draco_mesh_sources
"${draco_src_root}/mesh/corner_table.cc"
"${draco_src_root}/mesh/corner_table.h"
"${draco_src_root}/mesh/corner_table_iterators.h"
"${draco_src_root}/mesh/mesh.cc"
"${draco_src_root}/mesh/mesh.h"
"${draco_src_root}/mesh/mesh_are_equivalent.cc"
"${draco_src_root}/mesh/mesh_are_equivalent.h"
"${draco_src_root}/mesh/mesh_attribute_corner_table.cc"
"${draco_src_root}/mesh/mesh_attribute_corner_table.h"
"${draco_src_root}/mesh/mesh_cleanup.cc"
"${draco_src_root}/mesh/mesh_cleanup.h"
"${draco_src_root}/mesh/mesh_misc_functions.cc"
"${draco_src_root}/mesh/mesh_misc_functions.h"
"${draco_src_root}/mesh/mesh_stripifier.cc"
"${draco_src_root}/mesh/mesh_stripifier.h"
"${draco_src_root}/mesh/triangle_soup_mesh_builder.cc"
"${draco_src_root}/mesh/triangle_soup_mesh_builder.h"
"${draco_src_root}/mesh/valence_cache.h")
list(APPEND draco_point_cloud_sources
"${draco_src_root}/point_cloud/point_cloud.cc"
"${draco_src_root}/point_cloud/point_cloud.h"
"${draco_src_root}/point_cloud/point_cloud_builder.cc"
"${draco_src_root}/point_cloud/point_cloud_builder.h")
list(
APPEND
draco_points_common_sources
"${draco_src_root}/compression/point_cloud/algorithms/point_cloud_compression_method.h"
"${draco_src_root}/compression/point_cloud/algorithms/point_cloud_types.h"
"${draco_src_root}/compression/point_cloud/algorithms/quantize_points_3.h"
"${draco_src_root}/compression/point_cloud/algorithms/queuing_policy.h")
list(
APPEND
draco_points_dec_sources
"${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc"
"${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.cc"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.h"
)
list(
APPEND
draco_points_enc_sources
"${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc"
"${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.cc"
"${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.h"
)
list(APPEND draco_metadata_sources
"${draco_src_root}/metadata/geometry_metadata.cc"
"${draco_src_root}/metadata/geometry_metadata.h"
"${draco_src_root}/metadata/metadata.cc"
"${draco_src_root}/metadata/metadata.h")
list(APPEND draco_metadata_enc_sources
"${draco_src_root}/metadata/metadata_encoder.cc"
"${draco_src_root}/metadata/metadata_encoder.h")
list(APPEND draco_metadata_dec_sources
"${draco_src_root}/metadata/metadata_decoder.cc"
"${draco_src_root}/metadata/metadata_decoder.h")
list(APPEND draco_animation_sources
"${draco_src_root}/animation/keyframe_animation.cc"
"${draco_src_root}/animation/keyframe_animation.h")
list(APPEND draco_animation_enc_sources
"${draco_src_root}/animation/keyframe_animation_encoder.cc"
"${draco_src_root}/animation/keyframe_animation_encoder.h")
list(APPEND draco_animation_dec_sources
"${draco_src_root}/animation/keyframe_animation_decoder.cc"
"${draco_src_root}/animation/keyframe_animation_decoder.h")
list(
APPEND draco_js_dec_sources
"${draco_src_root}/javascript/emscripten/decoder_webidl_wrapper.cc"
"${draco_src_root}/javascript/emscripten/draco_decoder_glue_wrapper.cc"
)
list(
APPEND draco_js_enc_sources
"${draco_src_root}/javascript/emscripten/draco_encoder_glue_wrapper.cc"
"${draco_src_root}/javascript/emscripten/encoder_webidl_wrapper.cc")
list(
APPEND
draco_animation_js_dec_sources
"${draco_src_root}/javascript/emscripten/animation_decoder_webidl_wrapper.cc"
"${draco_src_root}/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc"
)
list(
APPEND
draco_animation_js_enc_sources
"${draco_src_root}/javascript/emscripten/animation_encoder_webidl_wrapper.cc"
"${draco_src_root}/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc"
)
list(APPEND draco_unity_plug_sources
"${draco_src_root}/unity/draco_unity_plugin.cc"
"${draco_src_root}/unity/draco_unity_plugin.h")
list(APPEND draco_maya_plug_sources
"${draco_src_root}/maya/draco_maya_plugin.cc"
"${draco_src_root}/maya/draco_maya_plugin.h")
#
# Draco targets.
#
if(EMSCRIPTEN AND DRACO_JS_GLUE)
# Draco decoder and encoder "executable" targets in various flavors for
# Emsscripten.
list(APPEND draco_decoder_src
${draco_attributes_sources}
${draco_compression_attributes_dec_sources}
${draco_compression_attributes_pred_schemes_dec_sources}
${draco_compression_bit_coders_sources}
${draco_compression_decode_sources}
${draco_compression_entropy_sources}
${draco_compression_mesh_traverser_sources}
${draco_compression_mesh_dec_sources}
${draco_compression_point_cloud_dec_sources}
${draco_core_sources}
${draco_dec_config_sources}
${draco_js_dec_sources}
${draco_mesh_sources}
${draco_metadata_dec_sources}
${draco_metadata_sources}
${draco_point_cloud_sources}
${draco_points_dec_sources})
list(APPEND draco_encoder_src
${draco_attributes_sources}
${draco_compression_attributes_enc_sources}
${draco_compression_attributes_pred_schemes_enc_sources}
${draco_compression_bit_coders_sources}
${draco_compression_encode_sources}
${draco_compression_entropy_sources}
${draco_compression_mesh_traverser_sources}
${draco_compression_mesh_enc_sources}
${draco_compression_point_cloud_enc_sources}
${draco_core_sources}
${draco_enc_config_sources}
${draco_js_enc_sources}
${draco_mesh_sources}
${draco_metadata_enc_sources}
${draco_metadata_sources}
${draco_point_cloud_sources}
${draco_points_enc_sources})
list(APPEND draco_js_dec_idl
"${draco_src_root}/javascript/emscripten/draco_web_decoder.idl")
list(APPEND draco_js_enc_idl
"${draco_src_root}/javascript/emscripten/draco_web_encoder.idl")
list(
APPEND
draco_animation_js_dec_idl
"${draco_src_root}/javascript/emscripten/draco_animation_web_decoder.idl")
list(
APPEND
draco_animation_js_enc_idl
"${draco_src_root}/javascript/emscripten/draco_animation_web_encoder.idl")
list(APPEND draco_pre_link_js_sources
"${draco_src_root}/javascript/emscripten/prepareCallbacks.js"
"${draco_src_root}/javascript/emscripten/version.js")
list(APPEND draco_post_link_js_sources
"${draco_src_root}/javascript/emscripten/finalize.js")
list(APPEND draco_post_link_js_decoder_sources ${draco_post_link_js_sources}
"${draco_src_root}/javascript/emscripten/decoder_functions.js")
set(draco_decoder_glue_path "${draco_build}/glue_decoder")
set(draco_encoder_glue_path "${draco_build}/glue_encoder")
draco_generate_emscripten_glue(INPUT_IDL ${draco_js_dec_idl} OUTPUT_PATH
${draco_decoder_glue_path})
draco_generate_emscripten_glue(INPUT_IDL ${draco_js_enc_idl} OUTPUT_PATH
${draco_encoder_glue_path})
if(DRACO_DECODER_ATTRIBUTE_DEDUPLICATION)
list(APPEND draco_decoder_features
"DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED"
"DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED")
endif()
draco_add_emscripten_executable(NAME
draco_decoder
SOURCES
${draco_decoder_src}
DEFINES
${draco_defines}
FEATURES
${draco_decoder_features}
INCLUDES
${draco_include_paths}
LINK_FLAGS
"-sEXPORT_NAME=\"DracoDecoderModule\""
GLUE_PATH
${draco_decoder_glue_path}
PRE_LINK_JS_SOURCES
${draco_pre_link_js_sources}
POST_LINK_JS_SOURCES
${draco_post_link_js_decoder_sources})
draco_add_emscripten_executable(
NAME
draco_encoder
SOURCES
${draco_encoder_src}
DEFINES
${draco_defines}
FEATURES
DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED
DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
INCLUDES
${draco_include_paths}
LINK_FLAGS
"-sEXPORT_NAME=\"DracoEncoderModule\""
GLUE_PATH
${draco_encoder_glue_path}
PRE_LINK_JS_SOURCES
${draco_pre_link_js_sources}
POST_LINK_JS_SOURCES
${draco_post_link_js_sources})
if(DRACO_ANIMATION_ENCODING)
set(draco_anim_decoder_glue_path "${draco_build}/glue_animation_decoder")
set(draco_anim_encoder_glue_path "${draco_build}/glue_animation_encoder")
draco_generate_emscripten_glue(INPUT_IDL ${draco_animation_js_dec_idl}
OUTPUT_PATH ${draco_anim_decoder_glue_path})
draco_generate_emscripten_glue(INPUT_IDL ${draco_animation_js_enc_idl}
OUTPUT_PATH ${draco_anim_encoder_glue_path})
draco_add_emscripten_executable(
NAME
draco_animation_decoder
SOURCES
${draco_animation_dec_sources}
${draco_animation_js_dec_sources}
${draco_animation_sources}
${draco_decoder_src}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
LINK_FLAGS
"-sEXPORT_NAME=\"DracoAnimationDecoderModule\""
GLUE_PATH
${draco_anim_decoder_glue_path}
PRE_LINK_JS_SOURCES
${draco_pre_link_js_sources}
POST_LINK_JS_SOURCES
${draco_post_link_js_decoder_sources})
draco_add_emscripten_executable(
NAME
draco_animation_encoder
SOURCES
${draco_animation_enc_sources}
${draco_animation_js_enc_sources}
${draco_animation_sources}
${draco_encoder_src}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
LINK_FLAGS
"-sEXPORT_NAME=\"DracoAnimationEncoderModule\""
GLUE_PATH
${draco_anim_encoder_glue_path}
PRE_LINK_JS_SOURCES
${draco_pre_link_js_sources}
POST_LINK_JS_SOURCES
${draco_post_link_js_sources})
endif()
else()
# Standard Draco libs, encoder and decoder. Object collections that mirror the
# Draco directory structure.
draco_add_library(NAME draco_attributes TYPE OBJECT SOURCES
${draco_attributes_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME
draco_compression_attributes_dec
OBJECT
${draco_compression_attributes_dec_sources}
TYPE
OBJECT
SOURCES
${draco_compression_attributes_dec_sources}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths})
draco_add_library(NAME draco_compression_attributes_enc TYPE OBJECT SOURCES
${draco_compression_attributes_enc_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_attributes_pred_schemes_dec TYPE
OBJECT SOURCES
${draco_compression_attributes_pred_schemes_dec_sources})
draco_add_library(NAME draco_compression_attributes_pred_schemes_enc TYPE
OBJECT SOURCES
${draco_compression_attributes_pred_schemes_enc_sources}
DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_bit_coders TYPE OBJECT SOURCES
${draco_compression_bit_coders_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_enc_config TYPE OBJECT SOURCES
${draco_enc_config_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_dec_config TYPE OBJECT SOURCES
${draco_dec_config_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_decode TYPE OBJECT SOURCES
${draco_compression_decode_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_encode TYPE OBJECT SOURCES
${draco_compression_encode_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_entropy TYPE OBJECT SOURCES
${draco_compression_entropy_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_mesh_traverser TYPE OBJECT SOURCES
${draco_compression_mesh_traverser_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_mesh_dec TYPE OBJECT SOURCES
${draco_compression_mesh_dec_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_mesh_enc TYPE OBJECT SOURCES
${draco_compression_mesh_enc_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_point_cloud_dec TYPE OBJECT SOURCES
${draco_compression_point_cloud_dec_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_compression_point_cloud_enc TYPE OBJECT SOURCES
${draco_compression_point_cloud_enc_sources} DEFINES
${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_core TYPE OBJECT SOURCES ${draco_core_sources}
DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_io TYPE OBJECT SOURCES ${draco_io_sources}
DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_mesh TYPE OBJECT SOURCES ${draco_mesh_sources}
DEFINES ${draco_defines} INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_metadata_dec TYPE OBJECT SOURCES
${draco_metadata_dec_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_metadata_enc TYPE OBJECT SOURCES
${draco_metadata_enc_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_metadata TYPE OBJECT SOURCES
${draco_metadata_sources} DEFINES ${draco_defines} INCLUDES
${draco_include_paths})
draco_add_library(NAME draco_animation_dec TYPE OBJECT SOURCES
${draco_animation_dec_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_animation_enc TYPE OBJECT SOURCES
${draco_animation_enc_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME draco_animation TYPE OBJECT SOURCES
${draco_animation_sources} DEFINES ${draco_defines} INCLUDES
${draco_include_paths})
draco_add_library(NAME draco_point_cloud TYPE OBJECT SOURCES
${draco_point_cloud_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME
draco_points_dec
TYPE
OBJECT
SOURCES
${draco_points_common_sources}
${draco_points_dec_sources}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths})
draco_add_library(NAME
draco_points_enc
TYPE
OBJECT
SOURCES
${draco_points_common_sources}
${draco_points_enc_sources}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths})
set(draco_object_library_deps
draco_attributes
draco_compression_attributes_dec
draco_compression_attributes_enc
draco_compression_attributes_pred_schemes_dec
draco_compression_attributes_pred_schemes_enc
draco_compression_bit_coders
draco_compression_decode
draco_compression_encode
draco_compression_entropy
draco_compression_mesh_dec
draco_compression_mesh_enc
draco_compression_point_cloud_dec
draco_compression_point_cloud_enc
draco_core
draco_dec_config
draco_enc_config
draco_io
draco_mesh
draco_metadata
draco_metadata_dec
draco_metadata_enc
draco_animation
draco_animation_dec
draco_animation_enc
draco_point_cloud
draco_points_dec
draco_points_enc)
# Library targets that consume the object collections.
if(MSVC OR WIN32)
# In order to produce a DLL and import library the Windows tools require
# that the exported symbols are part of the DLL target. The unfortunate side
# effect of this is that a single configuration cannot output both the
# static library and the DLL: This results in an either/or situation.
# Windows users of the draco build can have a DLL and an import library,
# or they can have a static library; they cannot have both from a single
# configuration of the build.
if(BUILD_SHARED_LIBS)
set(draco_lib_type SHARED)
else()
set(draco_lib_type STATIC)
endif()
draco_add_library(NAME
draco
OUTPUT_NAME
draco
TYPE
${draco_lib_type}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
OBJLIB_DEPS
${draco_object_library_deps})
else()
draco_add_library(NAME
draco_static
OUTPUT_NAME
draco
TYPE
STATIC
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
OBJLIB_DEPS
${draco_object_library_deps})
if(BUILD_SHARED_LIBS)
draco_add_library(NAME
draco_shared
SOURCES
"${draco_src_root}/core/draco_version.h"
OUTPUT_NAME
draco
TYPE
SHARED
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
LIB_DEPS
draco_static)
endif()
endif()
if(DRACO_UNITY_PLUGIN)
if(IOS)
set(unity_decoder_lib_type STATIC)
else()
set(unity_decoder_lib_type MODULE)
endif()
draco_add_library(NAME draco_unity_plugin TYPE OBJECT SOURCES
${draco_unity_plug_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME
dracodec_unity
TYPE
${unity_decoder_lib_type}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
OBJLIB_DEPS
draco_unity_plugin
LIB_DEPS
${draco_plugin_dependency})
# For Mac, we need to build a .bundle for the unity plugin.
if(APPLE)
set_target_properties(dracodec_unity PROPERTIES BUNDLE true)
elseif(NOT unity_decoder_lib_type STREQUAL STATIC)
set_target_properties(dracodec_unity
PROPERTIES SOVERSION ${DRACO_SOVERSION})
endif()
endif()
if(DRACO_MAYA_PLUGIN)
draco_add_library(NAME draco_maya_plugin TYPE OBJECT SOURCES
${draco_maya_plug_sources} DEFINES ${draco_defines}
INCLUDES ${draco_include_paths})
draco_add_library(NAME
draco_maya_wrapper
TYPE
MODULE
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
OBJLIB_DEPS
draco_maya_plugin
LIB_DEPS
${draco_plugin_dependency})
# For Mac, we need to build a .bundle for the plugin.
if(APPLE)
set_target_properties(draco_maya_wrapper PROPERTIES BUNDLE true)
else()
set_target_properties(draco_maya_wrapper
PROPERTIES SOVERSION ${DRACO_SOVERSION})
endif()
endif()
# Draco app targets.
draco_add_executable(NAME
draco_decoder
SOURCES
"${draco_src_root}/tools/draco_decoder.cc"
${draco_io_sources}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
LIB_DEPS
${draco_dependency})
draco_add_executable(NAME
draco_encoder
SOURCES
"${draco_src_root}/tools/draco_encoder.cc"
${draco_io_sources}
DEFINES
${draco_defines}
INCLUDES
${draco_include_paths}
LIB_DEPS
${draco_dependency})
draco_setup_install_target()
draco_setup_test_targets()
endif()
if(DRACO_VERBOSE)
draco_dump_cmake_flag_variables()
draco_dump_tracked_configuration_variables()
draco_dump_options()
endif()

View File

@ -0,0 +1,27 @@
Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement](https://cla.developers.google.com/about/google-individual)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose.
Please make sure that your code conforms with our
[coding style guidelines](https://google.github.io/styleguide/cppguide.html).
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the
[Software Grant and Corporate Contributor License Agreement](https://cla.developers.google.com/about/google-corporate).

View File

@ -0,0 +1,252 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
Files: docs/assets/js/ASCIIMathML.js
Copyright (c) 2014 Peter Jipsen and other ASCIIMathML.js contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
--------------------------------------------------------------------------------
Files: docs/assets/css/pygments/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org>

View File

@ -0,0 +1,478 @@
<p align="center">
<img width="350px" src="docs/artwork/draco3d-vert.svg" />
</p>
![Build Status: master](https://travis-ci.org/google/draco.svg?branch=master)
News
=======
### Version 1.4.1 release
* Using the versioned gstatic.com WASM and Javascript decoders is now
recommended. To use v1.4.1, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.4.1/*
* Replace the * with the files to load. E.g.
* https://gstatic.com/draco/versioned/decoders/1.4.1/draco_decoder.js
* This works with the v1.3.6 and v1.4.0 releases, and will work with future
Draco releases.
* Bug fixes
### Version 1.4.0 release
* WASM and JavaScript decoders are hosted from a static URL.
* It is recommended to always pull your Draco WASM and JavaScript decoders from this URL:
* https://www.gstatic.com/draco/v1/decoders/*
* Replace * with the files to load. E.g.
* https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm
* Users will benefit from having the Draco decoder in cache as more sites start using the static URL
* Changed npm modules to use WASM, which increased performance by ~200%.
* Updated Emscripten to 2.0.
* This causes the Draco codec modules to return a promise instead of the module directly.
* Please see the example code on how to handle the promise.
* Changed NORMAL quantization default to 8.
* Added new array API to decoder and deprecated DecoderBuffer.
* See PR https://github.com/google/draco/issues/513 for more information.
* Changed WASM/JavaScript behavior of catching exceptions.
* See issue https://github.com/google/draco/issues/629 for more information.
* Code cleanup.
* Emscripten builds now disable NODEJS_CATCH_EXIT and NODEJS_CATCH_REJECTION.
* Authors of a CLI tool might want to add their own error handlers.
* Added Maya plugin builds.
* Unity plugin builds updated.
* Builds are now stored as archives.
* Added iOS build.
* Unity users may want to look into https://github.com/atteneder/DracoUnity.
* Bug fixes.
### Version 1.3.6 release
* WASM and JavaScript decoders are now hosted from a static URL
* It is recommended to always pull your Draco WASM and JavaScript decoders from this URL:
* https://www.gstatic.com/draco/v1/decoders/*
* Replace * with the files to load. E.g.
* https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm
* Users will benefit from having the Draco decoder in cache as more sites start using the static URL
* Changed web examples to pull Draco decoders from static URL
* Added new API to Draco WASM decoder, which increased performance by ~15%
* Decreased Draco WASM decoder size by ~20%
* Added support for generic and multiple attributes to Draco Unity plug-ins
* Added new API to Draco Unity, which increased decoder performance by ~15%
* Changed quantization defaults:
* POSITION: 11
* NORMAL: 7
* TEX_COORD: 10
* COLOR: 8
* GENERIC: 8
* Code cleanup
* Bug fixes
### Version 1.3.5 release
* Added option to build Draco for Universal Scene Description
* Code cleanup
* Bug fixes
### Version 1.3.4 release
* Released Draco Animation code
* Fixes for Unity
* Various file location and name changes
### Version 1.3.3 release
* Added ExpertEncoder to the Javascript API
* Allows developers to set quantization options per attribute id
* Bug fixes
### Version 1.3.2 release
* Bug fixes
### Version 1.3.1 release
* Fix issue with multiple attributes when skipping an attribute transform
### Version 1.3.0 release
* Improved kD-tree based point cloud encoding
* Now applicable to point clouds with any number of attributes
* Support for all integer attribute types and quantized floating point types
* Improved mesh compression up to 10% (on average ~2%)
* For meshes, the 1.3.0 bitstream is fully compatible with 1.2.x decoders
* Improved Javascript API
* Added support for all signed and unsigned integer types
* Added support for point clouds to our Javascript encoder API
* Added support for integer properties to the PLY decoder
* Bug fixes
### Previous releases
https://github.com/google/draco/releases
Description
===========
Draco is a library for compressing and decompressing 3D geometric [meshes] and
[point clouds]. It is intended to improve the storage and transmission of 3D
graphics.
Draco was designed and built for compression efficiency and speed. The code
supports compressing points, connectivity information, texture coordinates,
color information, normals, and any other generic attributes associated with
geometry. With Draco, applications using 3D graphics can be significantly
smaller without compromising visual fidelity. For users, this means apps can
now be downloaded faster, 3D graphics in the browser can load quicker, and VR
and AR scenes can now be transmitted with a fraction of the bandwidth and
rendered quickly.
Draco is released as C++ source code that can be used to compress 3D graphics
as well as C++ and Javascript decoders for the encoded data.
_**Contents**_
* [Building](#building)
* [Usage](#usage)
* [Unity](#unity)
* [WASM and JavaScript Decoders](#WASM-and-JavaScript-Decoders)
* [Command Line Applications](#command-line-applications)
* [Encoding Tool](#encoding-tool)
* [Encoding Point Clouds](#encoding-point-clouds)
* [Decoding Tool](#decoding-tool)
* [C++ Decoder API](#c-decoder-api)
* [Javascript Encoder API](#javascript-encoder-api)
* [Javascript Decoder API](#javascript-decoder-api)
* [Javascript Decoder Performance](#javascript-decoder-performance)
* [Metadata API](#metadata-api)
* [NPM Package](#npm-package)
* [three.js Renderer Example](#threejs-renderer-example)
* [Support](#support)
* [License](#license)
* [References](#references)
Building
========
See [BUILDING](BUILDING.md) for building instructions.
Usage
======
Unity
-----
For the best information about using Unity with Draco please visit https://github.com/atteneder/DracoUnity
For a simple example of using Unity with Draco see [README](unity/README.md) in the unity folder.
WASM and JavaScript Decoders
----------------------------
It is recommended to always pull your Draco WASM and JavaScript decoders from:
~~~~~ bash
https://www.gstatic.com/draco/v1/decoders/
~~~~~
Users will benefit from having the Draco decoder in cache as more sites start using the static URL.
Command Line Applications
------------------------
The default target created from the build files will be the `draco_encoder`
and `draco_decoder` command line applications. For both applications, if you
run them without any arguments or `-h`, the applications will output usage and
options.
Encoding Tool
-------------
`draco_encoder` will read OBJ or PLY files as input, and output Draco-encoded
files. We have included Stanford's [Bunny] mesh for testing. The basic command
line looks like this:
~~~~~ bash
./draco_encoder -i testdata/bun_zipper.ply -o out.drc
~~~~~
A value of `0` for the quantization parameter will not perform any quantization
on the specified attribute. Any value other than `0` will quantize the input
values for the specified attribute to that number of bits. For example:
~~~~~ bash
./draco_encoder -i testdata/bun_zipper.ply -o out.drc -qp 14
~~~~~
will quantize the positions to 14 bits (default is 11 for the position
coordinates).
In general, the more you quantize your attributes the better compression rate
you will get. It is up to your project to decide how much deviation it will
tolerate. In general, most projects can set quantization values of about `11`
without any noticeable difference in quality.
The compression level (`-cl`) parameter turns on/off different compression
features.
~~~~~ bash
./draco_encoder -i testdata/bun_zipper.ply -o out.drc -cl 8
~~~~~
In general, the highest setting, `10`, will have the most compression but
worst decompression speed. `0` will have the least compression, but best
decompression speed. The default setting is `7`.
Encoding Point Clouds
---------------------
You can encode point cloud data with `draco_encoder` by specifying the
`-point_cloud` parameter. If you specify the `-point_cloud` parameter with a
mesh input file, `draco_encoder` will ignore the connectivity data and encode
the positions from the mesh file.
~~~~~ bash
./draco_encoder -point_cloud -i testdata/bun_zipper.ply -o out.drc
~~~~~
This command line will encode the mesh input as a point cloud, even though the
input might not produce compression that is representative of other point
clouds. Specifically, one can expect much better compression rates for larger
and denser point clouds.
Decoding Tool
-------------
`draco_decoder` will read Draco files as input, and output OBJ or PLY files.
The basic command line looks like this:
~~~~~ bash
./draco_decoder -i in.drc -o out.obj
~~~~~
C++ Decoder API
-------------
If you'd like to add decoding to your applications you will need to include
the `draco_dec` library. In order to use the Draco decoder you need to
initialize a `DecoderBuffer` with the compressed data. Then call
`DecodeMeshFromBuffer()` to return a decoded mesh object or call
`DecodePointCloudFromBuffer()` to return a decoded `PointCloud` object. For
example:
~~~~~ cpp
draco::DecoderBuffer buffer;
buffer.Init(data.data(), data.size());
const draco::EncodedGeometryType geom_type =
draco::GetEncodedGeometryType(&buffer);
if (geom_type == draco::TRIANGULAR_MESH) {
unique_ptr<draco::Mesh> mesh = draco::DecodeMeshFromBuffer(&buffer);
} else if (geom_type == draco::POINT_CLOUD) {
unique_ptr<draco::PointCloud> pc = draco::DecodePointCloudFromBuffer(&buffer);
}
~~~~~
Please see [src/draco/mesh/mesh.h](src/draco/mesh/mesh.h) for the full `Mesh` class interface and
[src/draco/point_cloud/point_cloud.h](src/draco/point_cloud/point_cloud.h) for the full `PointCloud` class interface.
Javascript Encoder API
----------------------
The Javascript encoder is located in `javascript/draco_encoder.js`. The encoder
API can be used to compress mesh and point cloud. In order to use the encoder,
you need to first create an instance of `DracoEncoderModule`. Then use this
instance to create `MeshBuilder` and `Encoder` objects. `MeshBuilder` is used
to construct a mesh from geometry data that could be later compressed by
`Encoder`. First create a mesh object using `new encoderModule.Mesh()` . Then,
use `AddFacesToMesh()` to add indices to the mesh and use
`AddFloatAttributeToMesh()` to add attribute data to the mesh, e.g. position,
normal, color and texture coordinates. After a mesh is constructed, you could
then use `EncodeMeshToDracoBuffer()` to compress the mesh. For example:
~~~~~ js
const mesh = {
indices : new Uint32Array(indices),
vertices : new Float32Array(vertices),
normals : new Float32Array(normals)
};
const encoderModule = DracoEncoderModule();
const encoder = new encoderModule.Encoder();
const meshBuilder = new encoderModule.MeshBuilder();
const dracoMesh = new encoderModule.Mesh();
const numFaces = mesh.indices.length / 3;
const numPoints = mesh.vertices.length;
meshBuilder.AddFacesToMesh(dracoMesh, numFaces, mesh.indices);
meshBuilder.AddFloatAttributeToMesh(dracoMesh, encoderModule.POSITION,
numPoints, 3, mesh.vertices);
if (mesh.hasOwnProperty('normals')) {
meshBuilder.AddFloatAttributeToMesh(
dracoMesh, encoderModule.NORMAL, numPoints, 3, mesh.normals);
}
if (mesh.hasOwnProperty('colors')) {
meshBuilder.AddFloatAttributeToMesh(
dracoMesh, encoderModule.COLOR, numPoints, 3, mesh.colors);
}
if (mesh.hasOwnProperty('texcoords')) {
meshBuilder.AddFloatAttributeToMesh(
dracoMesh, encoderModule.TEX_COORD, numPoints, 3, mesh.texcoords);
}
if (method === "edgebreaker") {
encoder.SetEncodingMethod(encoderModule.MESH_EDGEBREAKER_ENCODING);
} else if (method === "sequential") {
encoder.SetEncodingMethod(encoderModule.MESH_SEQUENTIAL_ENCODING);
}
const encodedData = new encoderModule.DracoInt8Array();
// Use default encoding setting.
const encodedLen = encoder.EncodeMeshToDracoBuffer(dracoMesh,
encodedData);
encoderModule.destroy(dracoMesh);
encoderModule.destroy(encoder);
encoderModule.destroy(meshBuilder);
~~~~~
Please see [src/draco/javascript/emscripten/draco_web_encoder.idl](src/draco/javascript/emscripten/draco_web_encoder.idl) for the full API.
Javascript Decoder API
----------------------
The Javascript decoder is located in [javascript/draco_decoder.js](javascript/draco_decoder.js). The
Javascript decoder can decode mesh and point cloud. In order to use the
decoder, you must first create an instance of `DracoDecoderModule`. The
instance is then used to create `DecoderBuffer` and `Decoder` objects. Set
the encoded data in the `DecoderBuffer`. Then call `GetEncodedGeometryType()`
to identify the type of geometry, e.g. mesh or point cloud. Then call either
`DecodeBufferToMesh()` or `DecodeBufferToPointCloud()`, which will return
a Mesh object or a point cloud. For example:
~~~~~ js
// Create the Draco decoder.
const decoderModule = DracoDecoderModule();
const buffer = new decoderModule.DecoderBuffer();
buffer.Init(byteArray, byteArray.length);
// Create a buffer to hold the encoded data.
const decoder = new decoderModule.Decoder();
const geometryType = decoder.GetEncodedGeometryType(buffer);
// Decode the encoded geometry.
let outputGeometry;
let status;
if (geometryType == decoderModule.TRIANGULAR_MESH) {
outputGeometry = new decoderModule.Mesh();
status = decoder.DecodeBufferToMesh(buffer, outputGeometry);
} else {
outputGeometry = new decoderModule.PointCloud();
status = decoder.DecodeBufferToPointCloud(buffer, outputGeometry);
}
// You must explicitly delete objects created from the DracoDecoderModule
// or Decoder.
decoderModule.destroy(outputGeometry);
decoderModule.destroy(decoder);
decoderModule.destroy(buffer);
~~~~~
Please see [src/draco/javascript/emscripten/draco_web_decoder.idl](src/draco/javascript/emscripten/draco_web_decoder.idl) for the full API.
Javascript Decoder Performance
------------------------------
The Javascript decoder is built with dynamic memory. This will let the decoder
work with all of the compressed data. But this option is not the fastest.
Pre-allocating the memory sees about a 2x decoder speed improvement. If you
know all of your project's memory requirements, you can turn on static memory
by changing `CMakeLists.txt` accordingly.
Metadata API
------------
Starting from v1.0, Draco provides metadata functionality for encoding data
other than geometry. It could be used to encode any custom data along with the
geometry. For example, we can enable metadata functionality to encode the name
of attributes, name of sub-objects and customized information.
For one mesh and point cloud, it can have one top-level geometry metadata class.
The top-level metadata then can have hierarchical metadata. Other than that,
the top-level metadata can have metadata for each attribute which is called
attribute metadata. The attribute metadata should be initialized with the
correspondent attribute id within the mesh. The metadata API is provided both
in C++ and Javascript.
For example, to add metadata in C++:
~~~~~ cpp
draco::PointCloud pc;
// Add metadata for the geometry.
std::unique_ptr<draco::GeometryMetadata> metadata =
std::unique_ptr<draco::GeometryMetadata>(new draco::GeometryMetadata());
metadata->AddEntryString("description", "This is an example.");
pc.AddMetadata(std::move(metadata));
// Add metadata for attributes.
draco::GeometryAttribute pos_att;
pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3,
draco::DT_FLOAT32, false, 12, 0);
const uint32_t pos_att_id = pc.AddAttribute(pos_att, false, 0);
std::unique_ptr<draco::AttributeMetadata> pos_metadata =
std::unique_ptr<draco::AttributeMetadata>(
new draco::AttributeMetadata(pos_att_id));
pos_metadata->AddEntryString("name", "position");
// Directly add attribute metadata to geometry.
// You can do this without explicitly add |GeometryMetadata| to mesh.
pc.AddAttributeMetadata(pos_att_id, std::move(pos_metadata));
~~~~~
To read metadata from a geometry in C++:
~~~~~ cpp
// Get metadata for the geometry.
const draco::GeometryMetadata *pc_metadata = pc.GetMetadata();
// Request metadata for a specific attribute.
const draco::AttributeMetadata *requested_pos_metadata =
pc.GetAttributeMetadataByStringEntry("name", "position");
~~~~~
Please see [src/draco/metadata](src/draco/metadata) and [src/draco/point_cloud](src/draco/point_cloud) for the full API.
NPM Package
-----------
Draco NPM NodeJS package is located in [javascript/npm/draco3d](javascript/npm/draco3d). Please see the
doc in the folder for detailed usage.
three.js Renderer Example
-------------------------
Here's an [example] of a geometric compressed with Draco loaded via a
Javascript decoder using the `three.js` renderer.
Please see the [javascript/example/README.md](javascript/example/README.md) file for more information.
Support
=======
For questions/comments please email <draco-3d-discuss@googlegroups.com>
If you have found an error in this library, please file an issue at
<https://github.com/google/draco/issues>
Patches are encouraged, and may be submitted by forking this project and
submitting a pull request through GitHub. See [CONTRIBUTING] for more detail.
License
=======
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
References
==========
[example]:https://storage.googleapis.com/demos.webmproject.org/draco/draco_loader_throw.html
[meshes]: https://en.wikipedia.org/wiki/Polygon_mesh
[point clouds]: https://en.wikipedia.org/wiki/Point_cloud
[Bunny]: https://graphics.stanford.edu/data/3Dscanrep/
[CONTRIBUTING]: https://raw.githubusercontent.com/google/draco/master/CONTRIBUTING.md
Bunny model from Stanford's graphic department <https://graphics.stanford.edu/data/3Dscanrep/>

View File

@ -0,0 +1,3 @@
@PACKAGE_INIT@
set_and_check(draco_INCLUDE_DIR "@PACKAGE_draco_include_install_dir@")
set_and_check(draco_LIBRARY_DIR "@PACKAGE_draco_lib_install_dir@")

View File

@ -0,0 +1,56 @@
# Finddraco
#
# Locates draco and sets the following variables:
#
# draco_FOUND draco_INCLUDE_DIRS draco_LIBARY_DIRS draco_LIBRARIES
# draco_VERSION_STRING
#
# draco_FOUND is set to YES only when all other variables are successfully
# configured.
unset(draco_FOUND)
unset(draco_INCLUDE_DIRS)
unset(draco_LIBRARY_DIRS)
unset(draco_LIBRARIES)
unset(draco_VERSION_STRING)
mark_as_advanced(draco_FOUND)
mark_as_advanced(draco_INCLUDE_DIRS)
mark_as_advanced(draco_LIBRARY_DIRS)
mark_as_advanced(draco_LIBRARIES)
mark_as_advanced(draco_VERSION_STRING)
set(draco_version_file_no_prefix "draco/src/draco/core/draco_version.h")
# Set draco_INCLUDE_DIRS
find_path(draco_INCLUDE_DIRS NAMES "${draco_version_file_no_prefix}")
# Extract the version string from draco_version.h.
if(draco_INCLUDE_DIRS)
set(draco_version_file
"${draco_INCLUDE_DIRS}/draco/src/draco/core/draco_version.h")
file(STRINGS "${draco_version_file}" draco_version REGEX "kdracoVersion")
list(GET draco_version 0 draco_version)
string(REPLACE "static const char kdracoVersion[] = " "" draco_version
"${draco_version}")
string(REPLACE ";" "" draco_version "${draco_version}")
string(REPLACE "\"" "" draco_version "${draco_version}")
set(draco_VERSION_STRING ${draco_version})
endif()
# Find the library.
if(BUILD_SHARED_LIBS)
find_library(draco_LIBRARIES NAMES draco.dll libdraco.dylib libdraco.so)
else()
find_library(draco_LIBRARIES NAMES draco.lib libdraco.a)
endif()
# Store path to library.
get_filename_component(draco_LIBRARY_DIRS ${draco_LIBRARIES} DIRECTORY)
if(draco_INCLUDE_DIRS
AND draco_LIBRARY_DIRS
AND draco_LIBRARIES
AND draco_VERSION_STRING)
set(draco_FOUND YES)
endif()

View File

@ -0,0 +1,220 @@
if(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_ 1)
include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
include("${draco_root}/cmake/compiler_tests.cmake")
# Strings used to cache failed C/CXX flags.
set(DRACO_FAILED_C_FLAGS)
set(DRACO_FAILED_CXX_FLAGS)
# Checks C compiler for support of $c_flag. Adds $c_flag to $CMAKE_C_FLAGS when
# the compile test passes. Caches $c_flag in $DRACO_FAILED_C_FLAGS when the test
# fails.
macro(add_c_flag_if_supported c_flag)
unset(C_FLAG_FOUND CACHE)
string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND)
unset(C_FLAG_FAILED CACHE)
string(FIND "${DRACO_FAILED_C_FLAGS}" "${c_flag}" C_FLAG_FAILED)
if(${C_FLAG_FOUND} EQUAL -1 AND ${C_FLAG_FAILED} EQUAL -1)
unset(C_FLAG_SUPPORTED CACHE)
message("Checking C compiler flag support for: " ${c_flag})
check_c_compiler_flag("${c_flag}" C_FLAG_SUPPORTED)
if(${C_FLAG_SUPPORTED})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${c_flag}" CACHE STRING "")
else()
set(DRACO_FAILED_C_FLAGS
"${DRACO_FAILED_C_FLAGS} ${c_flag}"
CACHE STRING "" FORCE)
endif()
endif()
endmacro()
# Checks C++ compiler for support of $cxx_flag. Adds $cxx_flag to
# $CMAKE_CXX_FLAGS when the compile test passes. Caches $c_flag in
# $DRACO_FAILED_CXX_FLAGS when the test fails.
macro(add_cxx_flag_if_supported cxx_flag)
unset(CXX_FLAG_FOUND CACHE)
string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND)
unset(CXX_FLAG_FAILED CACHE)
string(FIND "${DRACO_FAILED_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FAILED)
if(${CXX_FLAG_FOUND} EQUAL -1 AND ${CXX_FLAG_FAILED} EQUAL -1)
unset(CXX_FLAG_SUPPORTED CACHE)
message("Checking CXX compiler flag support for: " ${cxx_flag})
check_cxx_compiler_flag("${cxx_flag}" CXX_FLAG_SUPPORTED)
if(${CXX_FLAG_SUPPORTED})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${cxx_flag}" CACHE STRING "")
else()
set(DRACO_FAILED_CXX_FLAGS
"${DRACO_FAILED_CXX_FLAGS} ${cxx_flag}"
CACHE STRING "" FORCE)
endif()
endif()
endmacro()
# Convenience method for adding a flag to both the C and C++ compiler command
# lines.
macro(add_compiler_flag_if_supported flag)
add_c_flag_if_supported(${flag})
add_cxx_flag_if_supported(${flag})
endmacro()
# Checks C compiler for support of $c_flag and terminates generation when
# support is not present.
macro(require_c_flag c_flag update_c_flags)
unset(C_FLAG_FOUND CACHE)
string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND)
if(${C_FLAG_FOUND} EQUAL -1)
unset(HAVE_C_FLAG CACHE)
message("Checking C compiler flag support for: " ${c_flag})
check_c_compiler_flag("${c_flag}" HAVE_C_FLAG)
if(NOT ${HAVE_C_FLAG})
message(
FATAL_ERROR "${PROJECT_NAME} requires support for C flag: ${c_flag}.")
endif()
if(${update_c_flags})
set(CMAKE_C_FLAGS "${c_flag} ${CMAKE_C_FLAGS}" CACHE STRING "" FORCE)
endif()
endif()
endmacro()
# Checks CXX compiler for support of $cxx_flag and terminates generation when
# support is not present.
macro(require_cxx_flag cxx_flag update_cxx_flags)
unset(CXX_FLAG_FOUND CACHE)
string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND)
if(${CXX_FLAG_FOUND} EQUAL -1)
unset(HAVE_CXX_FLAG CACHE)
message("Checking CXX compiler flag support for: " ${cxx_flag})
check_cxx_compiler_flag("${cxx_flag}" HAVE_CXX_FLAG)
if(NOT ${HAVE_CXX_FLAG})
message(
FATAL_ERROR
"${PROJECT_NAME} requires support for CXX flag: ${cxx_flag}.")
endif()
if(${update_cxx_flags})
set(CMAKE_CXX_FLAGS
"${cxx_flag} ${CMAKE_CXX_FLAGS}"
CACHE STRING "" FORCE)
endif()
endif()
endmacro()
# Checks for support of $flag by both the C and CXX compilers. Terminates
# generation when support is not present in both compilers.
macro(require_compiler_flag flag update_cmake_flags)
require_c_flag(${flag} ${update_cmake_flags})
require_cxx_flag(${flag} ${update_cmake_flags})
endmacro()
# Checks only non-MSVC targets for support of $c_flag and terminates generation
# when support is not present.
macro(require_c_flag_nomsvc c_flag update_c_flags)
if(NOT MSVC)
require_c_flag(${c_flag} ${update_c_flags})
endif()
endmacro()
# Checks only non-MSVC targets for support of $cxx_flag and terminates
# generation when support is not present.
macro(require_cxx_flag_nomsvc cxx_flag update_cxx_flags)
if(NOT MSVC)
require_cxx_flag(${cxx_flag} ${update_cxx_flags})
endif()
endmacro()
# Checks only non-MSVC targets for support of $flag by both the C and CXX
# compilers. Terminates generation when support is not present in both
# compilers.
macro(require_compiler_flag_nomsvc flag update_cmake_flags)
require_c_flag_nomsvc(${flag} ${update_cmake_flags})
require_cxx_flag_nomsvc(${flag} ${update_cmake_flags})
endmacro()
# Adds $flag to assembler command line.
macro(append_as_flag flag)
unset(AS_FLAG_FOUND CACHE)
string(FIND "${DRACO_AS_FLAGS}" "${flag}" AS_FLAG_FOUND)
if(${AS_FLAG_FOUND} EQUAL -1)
set(DRACO_AS_FLAGS "${DRACO_AS_FLAGS} ${flag}")
endif()
endmacro()
# Adds $flag to the C compiler command line.
macro(append_c_flag flag)
unset(C_FLAG_FOUND CACHE)
string(FIND "${CMAKE_C_FLAGS}" "${flag}" C_FLAG_FOUND)
if(${C_FLAG_FOUND} EQUAL -1)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
endif()
endmacro()
# Adds $flag to the CXX compiler command line.
macro(append_cxx_flag flag)
unset(CXX_FLAG_FOUND CACHE)
string(FIND "${CMAKE_CXX_FLAGS}" "${flag}" CXX_FLAG_FOUND)
if(${CXX_FLAG_FOUND} EQUAL -1)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
endif()
endmacro()
# Adds $flag to the C and CXX compiler command lines.
macro(append_compiler_flag flag)
append_c_flag(${flag})
append_cxx_flag(${flag})
endmacro()
# Adds $flag to the executable linker command line.
macro(append_exe_linker_flag flag)
unset(LINKER_FLAG_FOUND CACHE)
string(FIND "${CMAKE_EXE_LINKER_FLAGS}" "${flag}" LINKER_FLAG_FOUND)
if(${LINKER_FLAG_FOUND} EQUAL -1)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${flag}")
endif()
endmacro()
# Adds $flag to the link flags for $target.
function(append_link_flag_to_target target flags)
unset(target_link_flags)
get_target_property(target_link_flags ${target} LINK_FLAGS)
if(target_link_flags)
unset(link_flag_found)
string(FIND "${target_link_flags}" "${flags}" link_flag_found)
if(NOT ${link_flag_found} EQUAL -1)
return()
endif()
set(target_link_flags "${target_link_flags} ${flags}")
else()
set(target_link_flags "${flags}")
endif()
set_target_properties(${target} PROPERTIES LINK_FLAGS ${target_link_flags})
endfunction()
# Adds $flag to executable linker flags, and makes sure C/CXX builds still work.
macro(require_linker_flag flag)
append_exe_linker_flag(${flag})
unset(c_passed)
draco_check_c_compiles("LINKER_FLAG_C_TEST(${flag})" "" c_passed)
unset(cxx_passed)
draco_check_cxx_compiles("LINKER_FLAG_CXX_TEST(${flag})" "" cxx_passed)
if(NOT c_passed OR NOT cxx_passed)
message(FATAL_ERROR "Linker flag test for ${flag} failed.")
endif()
endmacro()

View File

@ -0,0 +1,103 @@
if(DRACO_CMAKE_COMPILER_TESTS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_COMPILER_TESTS_CMAKE_ 1)
include(CheckCSourceCompiles)
include(CheckCXXSourceCompiles)
# The basic main() macro used in all compile tests.
set(DRACO_C_MAIN "\nint main(void) { return 0; }")
set(DRACO_CXX_MAIN "\nint main() { return 0; }")
# Strings containing the names of passed and failed tests.
set(DRACO_C_PASSED_TESTS)
set(DRACO_C_FAILED_TESTS)
set(DRACO_CXX_PASSED_TESTS)
set(DRACO_CXX_FAILED_TESTS)
macro(draco_push_var var new_value)
set(SAVED_${var} ${var})
set(${var} ${new_value})
endmacro()
macro(draco_pop_var var)
set(var ${SAVED_${var}})
unset(SAVED_${var})
endmacro()
# Confirms $test_source compiles and stores $test_name in one of
# $DRACO_C_PASSED_TESTS or $DRACO_C_FAILED_TESTS depending on out come. When the
# test passes $result_var is set to 1. When it fails $result_var is unset. The
# test is not run if the test name is found in either of the passed or failed
# test variables.
macro(draco_check_c_compiles test_name test_source result_var)
unset(C_TEST_PASSED CACHE)
unset(C_TEST_FAILED CACHE)
string(FIND "${DRACO_C_PASSED_TESTS}" "${test_name}" C_TEST_PASSED)
string(FIND "${DRACO_C_FAILED_TESTS}" "${test_name}" C_TEST_FAILED)
if(${C_TEST_PASSED} EQUAL -1 AND ${C_TEST_FAILED} EQUAL -1)
unset(C_TEST_COMPILED CACHE)
message("Running C compiler test: ${test_name}")
check_c_source_compiles("${test_source} ${DRACO_C_MAIN}" C_TEST_COMPILED)
set(${result_var} ${C_TEST_COMPILED})
if(${C_TEST_COMPILED})
set(DRACO_C_PASSED_TESTS "${DRACO_C_PASSED_TESTS} ${test_name}")
else()
set(DRACO_C_FAILED_TESTS "${DRACO_C_FAILED_TESTS} ${test_name}")
message("C Compiler test ${test_name} failed.")
endif()
elseif(NOT ${C_TEST_PASSED} EQUAL -1)
set(${result_var} 1)
else() # ${C_TEST_FAILED} NOT EQUAL -1
unset(${result_var})
endif()
endmacro()
# Confirms $test_source compiles and stores $test_name in one of
# $DRACO_CXX_PASSED_TESTS or $DRACO_CXX_FAILED_TESTS depending on out come. When
# the test passes $result_var is set to 1. When it fails $result_var is unset.
# The test is not run if the test name is found in either of the passed or
# failed test variables.
macro(draco_check_cxx_compiles test_name test_source result_var)
unset(CXX_TEST_PASSED CACHE)
unset(CXX_TEST_FAILED CACHE)
string(FIND "${DRACO_CXX_PASSED_TESTS}" "${test_name}" CXX_TEST_PASSED)
string(FIND "${DRACO_CXX_FAILED_TESTS}" "${test_name}" CXX_TEST_FAILED)
if(${CXX_TEST_PASSED} EQUAL -1 AND ${CXX_TEST_FAILED} EQUAL -1)
unset(CXX_TEST_COMPILED CACHE)
message("Running CXX compiler test: ${test_name}")
check_cxx_source_compiles("${test_source} ${DRACO_CXX_MAIN}"
CXX_TEST_COMPILED)
set(${result_var} ${CXX_TEST_COMPILED})
if(${CXX_TEST_COMPILED})
set(DRACO_CXX_PASSED_TESTS "${DRACO_CXX_PASSED_TESTS} ${test_name}")
else()
set(DRACO_CXX_FAILED_TESTS "${DRACO_CXX_FAILED_TESTS} ${test_name}")
message("CXX Compiler test ${test_name} failed.")
endif()
elseif(NOT ${CXX_TEST_PASSED} EQUAL -1)
set(${result_var} 1)
else() # ${CXX_TEST_FAILED} NOT EQUAL -1
unset(${result_var})
endif()
endmacro()
# Convenience macro that confirms $test_source compiles as C and C++.
# $result_var is set to 1 when both tests are successful, and 0 when one or both
# tests fail. Note: This macro is intended to be used to write to result
# variables that are expanded via configure_file(). $result_var is set to 1 or 0
# to allow direct usage of the value in generated source files.
macro(draco_check_source_compiles test_name test_source result_var)
unset(C_PASSED)
unset(CXX_PASSED)
draco_check_c_compiles(${test_name} ${test_source} C_PASSED)
draco_check_cxx_compiles(${test_name} ${test_source} CXX_PASSED)
if(${C_PASSED} AND ${CXX_PASSED})
set(${result_var} 1)
else()
set(${result_var} 0)
endif()
endmacro()

View File

@ -0,0 +1,2 @@
set(DRACO_INCLUDE_DIRS "@DRACO_INCLUDE_DIRS@")
set(DRACO_LIBRARIES "draco")

View File

@ -0,0 +1,11 @@
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: @PROJECT_NAME@
Description: Draco geometry de(com)pression library.
Version: @DRACO_VERSION@
Cflags: -I${includedir}
Libs: -L${libdir} -ldraco
Libs.private: @CMAKE_THREAD_LIBS_INIT@

View File

@ -0,0 +1,117 @@
if(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_
set(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_ 1)
# Utility for controlling the main draco library dependency. This changes in
# shared builds, and when an optional target requires a shared library build.
macro(set_draco_target)
if(MSVC OR WIN32)
set(draco_dependency draco)
set(draco_plugin_dependency ${draco_dependency})
else()
if(BUILD_SHARED_LIBS)
set(draco_dependency draco_shared)
else()
set(draco_dependency draco_static)
endif()
set(draco_plugin_dependency draco_static)
endif()
if(BUILD_SHARED_LIBS)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
endmacro()
# Configures flags and sets build system globals.
macro(draco_set_build_definitions)
string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_lowercase)
if(build_type_lowercase MATCHES "rel" AND DRACO_FAST)
if(MSVC)
list(APPEND draco_msvc_cxx_flags "/Ox")
else()
list(APPEND draco_base_cxx_flags "-O3")
endif()
endif()
draco_load_version_info()
set(DRACO_SOVERSION 1)
list(APPEND draco_include_paths "${draco_root}" "${draco_root}/src"
"${draco_build}")
if(DRACO_ABSL)
list(APPEND draco_include_path "${draco_root}/third_party/abseil-cpp")
endif()
list(APPEND draco_gtest_include_paths
"${draco_root}/../googletest/googlemock/include"
"${draco_root}/../googletest/googlemock"
"${draco_root}/../googletest/googletest/include"
"${draco_root}/../googletest/googletest")
list(APPEND draco_test_include_paths ${draco_include_paths}
${draco_gtest_include_paths})
list(APPEND draco_defines "DRACO_CMAKE=1"
"DRACO_FLAGS_SRCDIR=\"${draco_root}\""
"DRACO_FLAGS_TMPDIR=\"/tmp\"")
if(MSVC OR WIN32)
list(APPEND draco_defines "_CRT_SECURE_NO_DEPRECATE=1" "NOMINMAX=1")
if(BUILD_SHARED_LIBS)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
endif()
endif()
if(ANDROID)
if(CMAKE_ANDROID_ARCH_ABI STREQUAL "armeabi-v7a")
set(CMAKE_ANDROID_ARM_MODE ON)
endif()
endif()
set_draco_target()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6")
# Quiet warnings in copy-list-initialization where {} elision has always
# been allowed.
list(APPEND draco_clang_cxx_flags "-Wno-missing-braces")
endif()
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "7")
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7")
# Quiet gcc 6 vs 7 abi warnings:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77728
list(APPEND draco_base_cxx_flags "-Wno-psabi")
list(APPEND ABSL_GCC_FLAGS "-Wno-psabi")
endif()
endif()
endif()
# Source file names ending in these suffixes will have the appropriate
# compiler flags added to their compile commands to enable intrinsics.
set(draco_neon_source_file_suffix "neon.cc")
set(draco_sse4_source_file_suffix "sse4.cc")
if((${CMAKE_CXX_COMPILER_ID}
STREQUAL
"GNU"
AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 5)
OR (${CMAKE_CXX_COMPILER_ID}
STREQUAL
"Clang"
AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4))
message(
WARNING "GNU/GCC < v5 or Clang/LLVM < v4, ENABLING COMPATIBILITY MODE.")
draco_enable_feature(FEATURE "DRACO_OLD_GCC")
endif()
if(EMSCRIPTEN)
draco_check_emscripten_environment()
draco_get_required_emscripten_flags(FLAG_LIST_VAR draco_base_cxx_flags)
endif()
endmacro()

View File

@ -0,0 +1,28 @@
if(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_
set(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_ 1)
# Detect optimizations available for the current target CPU.
macro(draco_optimization_detect)
if(DRACO_ENABLE_OPTIMIZATIONS)
string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" cpu_lowercase)
if(cpu_lowercase MATCHES "^arm|^aarch64")
set(draco_have_neon ON)
elseif(cpu_lowercase MATCHES "^x86|amd64")
set(draco_have_sse4 ON)
endif()
endif()
if(draco_have_neon AND DRACO_ENABLE_NEON)
list(APPEND draco_defines "DRACO_ENABLE_NEON=1")
else()
list(APPEND draco_defines "DRACO_ENABLE_NEON=0")
endif()
if(draco_have_sse4 AND DRACO_ENABLE_SSE4_1)
list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=1")
else()
list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=0")
endif()
endmacro()

View File

@ -0,0 +1,185 @@
if(DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_
# Checks environment for Emscripten prerequisites.
macro(draco_check_emscripten_environment)
if(NOT PYTHONINTERP_FOUND)
message(
FATAL_ERROR
"Python required for Emscripten builds, but cmake cannot find it.")
endif()
if(NOT EXISTS "$ENV{EMSCRIPTEN}")
message(
FATAL_ERROR
"The EMSCRIPTEN environment variable must be set. See README.md.")
endif()
endmacro()
# Obtains the required Emscripten flags for Draco targets.
macro(draco_get_required_emscripten_flags)
set(em_FLAG_LIST_VAR)
set(em_flags)
set(em_single_arg_opts FLAG_LIST_VAR)
set(em_multi_arg_opts)
cmake_parse_arguments(em "${em_flags}" "${em_single_arg_opts}"
"${em_multi_arg_opts}" ${ARGN})
if(NOT em_FLAG_LIST_VAR)
message(FATAL "draco_get_required_emscripten_flags: FLAG_LIST_VAR required")
endif()
if(DRACO_JS_GLUE)
unset(required_flags)
list(APPEND ${em_FLAG_LIST_VAR} "-sALLOW_MEMORY_GROWTH=1")
list(APPEND ${em_FLAG_LIST_VAR} "-Wno-almost-asm")
list(APPEND ${em_FLAG_LIST_VAR} "--memory-init-file" "0")
list(APPEND ${em_FLAG_LIST_VAR} "-fno-omit-frame-pointer")
list(APPEND ${em_FLAG_LIST_VAR} "-sMODULARIZE=1")
list(APPEND ${em_FLAG_LIST_VAR} "-sNO_FILESYSTEM=1")
list(APPEND ${em_FLAG_LIST_VAR} "-sEXPORTED_RUNTIME_METHODS=[]")
list(APPEND ${em_FLAG_LIST_VAR} "-sPRECISE_F32=1")
list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_EXIT=0")
list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_REJECTION=0")
if(DRACO_FAST)
list(APPEND ${em_FLAG_LIST_VAR} "--llvm-lto" "1")
endif()
if(DRACO_WASM)
list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=1")
else()
list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=0")
endif()
if(DRACO_IE_COMPATIBLE)
list(APPEND ${em_FLAG_LIST_VAR} "-sLEGACY_VM_SUPPORT=1")
endif()
endif()
endmacro()
# Macro for generating C++ glue code from IDL for Emscripten targets. Executes
# python to generate the C++ binding, and establishes dendency: $OUTPUT_PATH.cpp
# on $INPUT_IDL.
macro(draco_generate_emscripten_glue)
set(glue_flags)
set(glue_single_arg_opts INPUT_IDL OUTPUT_PATH)
set(glue_multi_arg_opts)
cmake_parse_arguments(glue "${glue_flags}" "${glue_single_arg_opts}"
"${glue_multi_arg_opts}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
message("--------- draco_generate_emscripten_glue -----------\n"
"glue_INPUT_IDL=${glue_INPUT_IDL}\n"
"glue_OUTPUT_PATH=${glue_OUTPUT_PATH}\n" ]
"----------------------------------------------------\n")
endif()
if(NOT glue_INPUT_IDL OR NOT glue_OUTPUT_PATH)
message(
FATAL_ERROR
"draco_generate_emscripten_glue: INPUT_IDL and OUTPUT_PATH required.")
endif()
# Generate the glue source.
execute_process(COMMAND ${PYTHON_EXECUTABLE}
$ENV{EMSCRIPTEN}/tools/webidl_binder.py
${glue_INPUT_IDL} ${glue_OUTPUT_PATH})
if(NOT EXISTS "${glue_OUTPUT_PATH}.cpp")
message(FATAL_ERROR "JS glue generation failed for ${glue_INPUT_IDL}.")
endif()
# Create a dependency so that it regenerated on edits.
add_custom_command(OUTPUT "${glue_OUTPUT_PATH}.cpp"
COMMAND ${PYTHON_EXECUTABLE}
$ENV{EMSCRIPTEN}/tools/webidl_binder.py
${glue_INPUT_IDL} ${glue_OUTPUT_PATH}
DEPENDS ${draco_js_dec_idl}
COMMENT "Generating ${glue_OUTPUT_PATH}.cpp."
WORKING_DIRECTORY ${draco_build}
VERBATIM)
endmacro()
# Wrapper for draco_add_executable() that handles the extra work necessary for
# emscripten targets when generating JS glue:
#
# ~~~
# - Set source level dependency on the C++ binding.
# - Pre/Post link emscripten magic.
#
# Required args:
# - GLUE_PATH: Base path for glue file. Used to generate .cpp and .js files.
# - PRE_LINK_JS_SOURCES: em_link_pre_js() source files.
# - POST_LINK_JS_SOURCES: em_link_post_js() source files.
# Optional args:
# - FEATURES:
# ~~~
macro(draco_add_emscripten_executable)
unset(emexe_NAME)
unset(emexe_FEATURES)
unset(emexe_SOURCES)
unset(emexe_DEFINES)
unset(emexe_INCLUDES)
unset(emexe_LINK_FLAGS)
set(optional_args)
set(single_value_args NAME GLUE_PATH)
set(multi_value_args SOURCES DEFINES FEATURES INCLUDES LINK_FLAGS
PRE_LINK_JS_SOURCES POST_LINK_JS_SOURCES)
cmake_parse_arguments(emexe "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT
(emexe_GLUE_PATH
AND emexe_POST_LINK_JS_SOURCES
AND emexe_PRE_LINK_JS_SOURCES))
message(FATAL
"draco_add_emscripten_executable: GLUE_PATH PRE_LINK_JS_SOURCES "
"POST_LINK_JS_SOURCES args required.")
endif()
if(DRACO_VERBOSE GREATER 1)
message("--------- draco_add_emscripten_executable ---------\n"
"emexe_NAME=${emexe_NAME}\n"
"emexe_SOURCES=${emexe_SOURCES}\n"
"emexe_DEFINES=${emexe_DEFINES}\n"
"emexe_INCLUDES=${emexe_INCLUDES}\n"
"emexe_LINK_FLAGS=${emexe_LINK_FLAGS}\n"
"emexe_GLUE_PATH=${emexe_GLUE_PATH}\n"
"emexe_FEATURES=${emexe_FEATURES}\n"
"emexe_PRE_LINK_JS_SOURCES=${emexe_PRE_LINK_JS_SOURCES}\n"
"emexe_POST_LINK_JS_SOURCES=${emexe_POST_LINK_JS_SOURCES}\n"
"----------------------------------------------------\n")
endif()
# The Emscripten linker needs the C++ flags in addition to whatever has been
# passed in with the target.
list(APPEND emexe_LINK_FLAGS ${DRACO_CXX_FLAGS})
if(DRACO_GLTF)
draco_add_executable(NAME
${emexe_NAME}
OUTPUT_NAME
${emexe_NAME}_gltf
SOURCES
${emexe_SOURCES}
DEFINES
${emexe_DEFINES}
INCLUDES
${emexe_INCLUDES}
LINK_FLAGS
${emexe_LINK_FLAGS})
else()
draco_add_executable(NAME ${emexe_NAME} SOURCES ${emexe_SOURCES} DEFINES
${emexe_DEFINES} INCLUDES ${emexe_INCLUDES} LINK_FLAGS
${emexe_LINK_FLAGS})
endif()
foreach(feature ${emexe_FEATURES})
draco_enable_feature(FEATURE ${feature} TARGETS ${emexe_NAME})
endforeach()
set_property(SOURCE ${emexe_SOURCES}
APPEND
PROPERTY OBJECT_DEPENDS "${emexe_GLUE_PATH}.cpp")
em_link_pre_js(${emexe_NAME} ${emexe_PRE_LINK_JS_SOURCES})
em_link_post_js(${emexe_NAME} "${emexe_GLUE_PATH}.js"
${emexe_POST_LINK_JS_SOURCES})
endmacro()

View File

@ -0,0 +1,63 @@
if(DRACO_CMAKE_DRACO_FEATURES_CMAKE_)
return()
endif()
set(DRACO_CMAKE_DRACO_FEATURES_CMAKE_ 1)
set(draco_features_file_name "${draco_build_dir}/draco/draco_features.h")
set(draco_features_list)
# Macro that handles tracking of Draco preprocessor symbols for the purpose of
# producing draco_features.h.
#
# draco_enable_feature(FEATURE <feature_name> [TARGETS <target_name>]) FEATURE
# is required. It should be a Draco preprocessor symbol. TARGETS is optional. It
# can be one or more draco targets.
#
# When the TARGETS argument is not present the preproc symbol is added to
# draco_features.h. When it is draco_features.h is unchanged, and
# target_compile_options() is called for each target specified.
macro(draco_enable_feature)
set(def_flags)
set(def_single_arg_opts FEATURE)
set(def_multi_arg_opts TARGETS)
cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}"
"${def_multi_arg_opts}" ${ARGN})
if("${DEF_FEATURE}" STREQUAL "")
message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().")
endif()
# Do nothing/return early if $DEF_FEATURE is already in the list.
list(FIND draco_features_list ${DEF_FEATURE} df_index)
if(NOT df_index EQUAL -1)
return()
endif()
list(LENGTH DEF_TARGETS df_targets_list_length)
if(${df_targets_list_length} EQUAL 0)
list(APPEND draco_features_list ${DEF_FEATURE})
else()
foreach(target ${DEF_TARGETS})
target_compile_definitions(${target} PRIVATE ${DEF_FEATURE})
endforeach()
endif()
endmacro()
# Function for generating draco_features.h.
function(draco_generate_features_h)
file(WRITE "${draco_features_file_name}.new"
"// GENERATED FILE -- DO NOT EDIT\n\n" "#ifndef DRACO_FEATURES_H_\n"
"#define DRACO_FEATURES_H_\n\n")
foreach(feature ${draco_features_list})
file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n")
endforeach()
file(APPEND "${draco_features_file_name}.new"
"\n#endif // DRACO_FEATURES_H_")
# Will replace ${draco_features_file_name} only if the file content has
# changed. This prevents forced Draco rebuilds after CMake runs.
configure_file("${draco_features_file_name}.new"
"${draco_features_file_name}")
file(REMOVE "${draco_features_file_name}.new")
endfunction()

View File

@ -0,0 +1,238 @@
if(DRACO_CMAKE_DRACO_FLAGS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_FLAGS_CMAKE_
set(DRACO_CMAKE_DRACO_FLAGS_CMAKE_ 1)
include(CheckCXXCompilerFlag)
include(CheckCXXSourceCompiles)
# Adds compiler flags specified by FLAGS to the sources specified by SOURCES:
#
# draco_set_compiler_flags_for_sources(SOURCES <sources> FLAGS <flags>)
macro(draco_set_compiler_flags_for_sources)
unset(compiler_SOURCES)
unset(compiler_FLAGS)
unset(optional_args)
unset(single_value_args)
set(multi_value_args SOURCES FLAGS)
cmake_parse_arguments(compiler "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT (compiler_SOURCES AND compiler_FLAGS))
draco_die("draco_set_compiler_flags_for_sources: SOURCES and "
"FLAGS required.")
endif()
set_source_files_properties(${compiler_SOURCES} PROPERTIES COMPILE_FLAGS
${compiler_FLAGS})
if(DRACO_VERBOSE GREATER 1)
foreach(source ${compiler_SOURCES})
foreach(flag ${compiler_FLAGS})
message("draco_set_compiler_flags_for_sources: source:${source} "
"flag:${flag}")
endforeach()
endforeach()
endif()
endmacro()
# Tests compiler flags stored in list(s) specified by FLAG_LIST_VAR_NAMES, adds
# flags to $DRACO_CXX_FLAGS when tests pass. Terminates configuration if
# FLAG_REQUIRED is specified and any flag check fails.
#
# ~~~
# draco_test_cxx_flag(<FLAG_LIST_VAR_NAMES <flag list variable(s)>>
# [FLAG_REQUIRED])
# ~~~
macro(draco_test_cxx_flag)
unset(cxx_test_FLAG_LIST_VAR_NAMES)
unset(cxx_test_FLAG_REQUIRED)
unset(single_value_args)
set(optional_args FLAG_REQUIRED)
set(multi_value_args FLAG_LIST_VAR_NAMES)
cmake_parse_arguments(cxx_test "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT cxx_test_FLAG_LIST_VAR_NAMES)
draco_die("draco_test_cxx_flag: FLAG_LIST_VAR_NAMES required")
endif()
unset(cxx_flags)
foreach(list_var ${cxx_test_FLAG_LIST_VAR_NAMES})
if(DRACO_VERBOSE)
message("draco_test_cxx_flag: adding ${list_var} to cxx_flags")
endif()
list(APPEND cxx_flags ${${list_var}})
endforeach()
if(DRACO_VERBOSE)
message("CXX test: all flags: ${cxx_flags}")
endif()
unset(all_cxx_flags)
list(APPEND all_cxx_flags ${DRACO_CXX_FLAGS} ${cxx_flags})
# Turn off output from check_cxx_source_compiles. Print status directly
# instead since the logging messages from check_cxx_source_compiles can be
# quite confusing.
set(CMAKE_REQUIRED_QUIET TRUE)
# Run the actual compile test.
unset(draco_all_cxx_flags_pass CACHE)
message("--- Running combined CXX flags test, flags: ${all_cxx_flags}")
check_cxx_compiler_flag("${all_cxx_flags}" draco_all_cxx_flags_pass)
if(cxx_test_FLAG_REQUIRED AND NOT draco_all_cxx_flags_pass)
draco_die("Flag test failed for required flag(s): "
"${all_cxx_flags} and FLAG_REQUIRED specified.")
endif()
if(draco_all_cxx_flags_pass)
# Test passed: update the global flag list used by the draco target creation
# wrappers.
set(DRACO_CXX_FLAGS ${cxx_flags})
list(REMOVE_DUPLICATES DRACO_CXX_FLAGS)
if(DRACO_VERBOSE)
message("DRACO_CXX_FLAGS=${DRACO_CXX_FLAGS}")
endif()
message("--- Passed combined CXX flags test")
else()
message("--- Failed combined CXX flags test, testing flags individually.")
if(cxx_flags)
message("--- Testing flags from $cxx_flags: " "${cxx_flags}")
foreach(cxx_flag ${cxx_flags})
# Since 3.17.0 check_cxx_compiler_flag() sets a normal variable at
# parent scope while check_cxx_source_compiles() continues to set an
# internal cache variable, so we unset both to avoid the failure /
# success state persisting between checks. This has been fixed in newer
# CMake releases, but 3.17 is pretty common: we will need this to avoid
# weird build breakages while the fix propagates.
unset(cxx_flag_test_passed)
unset(cxx_flag_test_passed CACHE)
message("--- Testing flag: ${cxx_flag}")
check_cxx_compiler_flag("${cxx_flag}" cxx_flag_test_passed)
if(cxx_flag_test_passed)
message("--- Passed test for ${cxx_flag}")
else()
list(REMOVE_ITEM cxx_flags ${cxx_flag})
message("--- Failed test for ${cxx_flag}, flag removed.")
endif()
endforeach()
set(DRACO_CXX_FLAGS ${cxx_flags})
endif()
endif()
if(DRACO_CXX_FLAGS)
list(REMOVE_DUPLICATES DRACO_CXX_FLAGS)
endif()
endmacro()
# Tests executable linker flags stored in list specified by FLAG_LIST_VAR_NAME,
# adds flags to $DRACO_EXE_LINKER_FLAGS when test passes. Terminates
# configuration when flag check fails. draco_set_cxx_flags() must be called
# before calling this macro because it assumes $DRACO_CXX_FLAGS contains only
# valid CXX flags.
#
# draco_test_exe_linker_flag(<FLAG_LIST_VAR_NAME <flag list variable)>)
macro(draco_test_exe_linker_flag)
unset(link_FLAG_LIST_VAR_NAME)
unset(optional_args)
unset(multi_value_args)
set(single_value_args FLAG_LIST_VAR_NAME)
cmake_parse_arguments(link "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT link_FLAG_LIST_VAR_NAME)
draco_die("draco_test_link_flag: FLAG_LIST_VAR_NAME required")
endif()
draco_set_and_stringify(DEST linker_flags SOURCE_VARS
${link_FLAG_LIST_VAR_NAME})
if(DRACO_VERBOSE)
message("EXE LINKER test: all flags: ${linker_flags}")
endif()
# Tests of $DRACO_CXX_FLAGS have already passed. Include them with the linker
# test.
draco_set_and_stringify(DEST CMAKE_REQUIRED_FLAGS SOURCE_VARS DRACO_CXX_FLAGS)
# Cache the global exe linker flags.
if(CMAKE_EXE_LINKER_FLAGS)
set(cached_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS})
draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags})
endif()
draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags}
${CMAKE_EXE_LINKER_FLAGS})
# Turn off output from check_cxx_source_compiles. Print status directly
# instead since the logging messages from check_cxx_source_compiles can be
# quite confusing.
set(CMAKE_REQUIRED_QUIET TRUE)
message("--- Running EXE LINKER test for flags: ${linker_flags}")
unset(linker_flag_test_passed CACHE)
set(draco_cxx_main "\nint main() { return 0; }")
check_cxx_source_compiles("${draco_cxx_main}" linker_flag_test_passed)
if(NOT linker_flag_test_passed)
draco_die("EXE LINKER test failed.")
endif()
message("--- Passed EXE LINKER flag test.")
# Restore cached global exe linker flags.
if(cached_CMAKE_EXE_LINKER_FLAGS)
set(CMAKE_EXE_LINKER_FLAGS ${cached_CMAKE_EXE_LINKER_FLAGS})
else()
unset(CMAKE_EXE_LINKER_FLAGS)
endif()
endmacro()
# Runs the draco compiler tests. This macro builds up the list of list var(s)
# that is passed to draco_test_cxx_flag().
#
# Note: draco_set_build_definitions() must be called before this macro.
macro(draco_set_cxx_flags)
unset(cxx_flag_lists)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
list(APPEND cxx_flag_lists draco_base_cxx_flags)
endif()
# Append clang flags after the base set to allow -Wno* overrides to take
# effect. Some of the base flags may enable a large set of warnings, e.g.,
# -Wall.
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
list(APPEND cxx_flag_lists draco_clang_cxx_flags)
endif()
if(MSVC)
list(APPEND cxx_flag_lists draco_msvc_cxx_flags)
endif()
draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists})
if(DRACO_VERBOSE)
message("draco_set_cxx_flags: internal CXX flags: ${cxx_flags}")
endif()
if(DRACO_CXX_FLAGS)
list(APPEND cxx_flag_lists DRACO_CXX_FLAGS)
if(DRACO_VERBOSE)
message("draco_set_cxx_flags: user CXX flags: ${DRACO_CXX_FLAGS}")
endif()
endif()
draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists})
if(cxx_flags)
draco_test_cxx_flag(FLAG_LIST_VAR_NAMES ${cxx_flag_lists})
endif()
endmacro()

View File

@ -0,0 +1,110 @@
if(DRACO_CMAKE_DRACO_HELPERS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_HELPERS_CMAKE_
set(DRACO_CMAKE_DRACO_HELPERS_CMAKE_ 1)
# Kills build generation using message(FATAL_ERROR) and outputs all data passed
# to the console via use of $ARGN.
macro(draco_die)
message(FATAL_ERROR ${ARGN})
endmacro()
# Converts semi-colon delimited list variable(s) to string. Output is written to
# variable supplied via the DEST parameter. Input is from an expanded variable
# referenced by SOURCE and/or variable(s) referenced by SOURCE_VARS.
macro(draco_set_and_stringify)
set(optional_args)
set(single_value_args DEST SOURCE_VAR)
set(multi_value_args SOURCE SOURCE_VARS)
cmake_parse_arguments(sas "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT sas_DEST OR NOT (sas_SOURCE OR sas_SOURCE_VARS))
draco_die("draco_set_and_stringify: DEST and at least one of SOURCE "
"SOURCE_VARS required.")
endif()
unset(${sas_DEST})
if(sas_SOURCE)
# $sas_SOURCE is one or more expanded variables, just copy the values to
# $sas_DEST.
set(${sas_DEST} "${sas_SOURCE}")
endif()
if(sas_SOURCE_VARS)
# $sas_SOURCE_VARS is one or more variable names. Each iteration expands a
# variable and appends it to $sas_DEST.
foreach(source_var ${sas_SOURCE_VARS})
set(${sas_DEST} "${${sas_DEST}} ${${source_var}}")
endforeach()
# Because $sas_DEST can be empty when entering this scope leading whitespace
# can be introduced to $sas_DEST on the first iteration of the above loop.
# Remove it:
string(STRIP "${${sas_DEST}}" ${sas_DEST})
endif()
# Lists in CMake are simply semicolon delimited strings, so stringification is
# just a find and replace of the semicolon.
string(REPLACE ";" " " ${sas_DEST} "${${sas_DEST}}")
if(DRACO_VERBOSE GREATER 1)
message("draco_set_and_stringify: ${sas_DEST}=${${sas_DEST}}")
endif()
endmacro()
# Creates a dummy source file in $DRACO_GENERATED_SOURCES_DIRECTORY and adds it
# to the specified target. Optionally adds its path to a list variable.
#
# draco_create_dummy_source_file(<TARGET <target> BASENAME <basename of file>>
# [LISTVAR <list variable>])
macro(draco_create_dummy_source_file)
set(optional_args)
set(single_value_args TARGET BASENAME LISTVAR)
set(multi_value_args)
cmake_parse_arguments(cdsf "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT cdsf_TARGET OR NOT cdsf_BASENAME)
draco_die("draco_create_dummy_source_file: TARGET and BASENAME required.")
endif()
if(NOT DRACO_GENERATED_SOURCES_DIRECTORY)
set(DRACO_GENERATED_SOURCES_DIRECTORY "${draco_build}/gen_src")
endif()
set(dummy_source_dir "${DRACO_GENERATED_SOURCES_DIRECTORY}")
set(dummy_source_file
"${dummy_source_dir}/draco_${cdsf_TARGET}_${cdsf_BASENAME}.cc")
set(dummy_source_code
"// Generated file. DO NOT EDIT!\n"
"// C++ source file created for target ${cdsf_TARGET}.\n"
"void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void)\;\n"
"void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void) {}\n")
file(WRITE "${dummy_source_file}" ${dummy_source_code})
target_sources(${cdsf_TARGET} PRIVATE ${dummy_source_file})
if(cdsf_LISTVAR)
list(APPEND ${cdsf_LISTVAR} "${dummy_source_file}")
endif()
endmacro()
# Loads the version string from $draco_source/draco/version.h and sets
# $DRACO_VERSION.
macro(draco_load_version_info)
file(STRINGS "${draco_src_root}/core/draco_version.h" version_file_strings)
foreach(str ${version_file_strings})
if(str MATCHES "char kDracoVersion")
string(FIND "${str}" "\"" open_quote_pos)
string(FIND "${str}" ";" semicolon_pos)
math(EXPR open_quote_pos "${open_quote_pos} + 1")
math(EXPR close_quote_pos "${semicolon_pos} - 1")
math(EXPR version_string_length "${close_quote_pos} - ${open_quote_pos}")
string(SUBSTRING "${str}" ${open_quote_pos} ${version_string_length}
DRACO_VERSION)
break()
endif()
endforeach()
endmacro()

View File

@ -0,0 +1,79 @@
if(DRACO_CMAKE_DRACO_INSTALL_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_INSTALL_CMAKE_
set(DRACO_CMAKE_DRACO_INSTALL_CMAKE_ 1)
# Sets up the draco install targets. Must be called after the static library
# target is created.
macro(draco_setup_install_target)
include(GNUInstallDirs)
# pkg-config: draco.pc
set(prefix "${CMAKE_INSTALL_PREFIX}")
set(exec_prefix "\${prefix}")
set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}")
set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
set(draco_lib_name "draco")
configure_file("${draco_root}/cmake/draco.pc.template"
"${draco_build}/draco.pc" @ONLY NEWLINE_STYLE UNIX)
install(FILES "${draco_build}/draco.pc"
DESTINATION "${prefix}/${CMAKE_INSTALL_LIBDIR}/pkgconfig")
# CMake config: draco-config.cmake
set(DRACO_INCLUDE_DIRS "${prefix}/${CMAKE_INSTALL_INCLUDEDIR}")
configure_file("${draco_root}/cmake/draco-config.cmake.template"
"${draco_build}/draco-config.cmake" @ONLY NEWLINE_STYLE UNIX)
install(
FILES "${draco_build}/draco-config.cmake"
DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/cmake")
foreach(file ${draco_sources})
if(file MATCHES "h$")
list(APPEND draco_api_includes ${file})
endif()
endforeach()
# Strip $draco_src_root from the file paths: we need to install relative to
# $include_directory.
list(TRANSFORM draco_api_includes REPLACE "${draco_src_root}/" "")
set(include_directory "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}")
foreach(draco_api_include ${draco_api_includes})
get_filename_component(file_directory ${draco_api_include} DIRECTORY)
set(target_directory "${include_directory}/draco/${file_directory}")
install(FILES ${draco_src_root}/${draco_api_include}
DESTINATION "${target_directory}")
endforeach()
install(
FILES "${draco_build}/draco/draco_features.h"
DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/draco/")
install(TARGETS draco_decoder DESTINATION
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}")
install(TARGETS draco_encoder DESTINATION
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}")
if(WIN32)
install(TARGETS draco DESTINATION
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
else()
install(TARGETS draco_static DESTINATION
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
if(BUILD_SHARED_LIBS)
install(TARGETS draco_shared DESTINATION
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
endif()
endif()
if(DRACO_UNITY_PLUGIN)
install(TARGETS dracodec_unity DESTINATION
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
endif()
if(DRACO_MAYA_PLUGIN)
install(TARGETS draco_maya_wrapper DESTINATION
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
endif()
endmacro()

View File

@ -0,0 +1,96 @@
if(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_
set(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_ 1)
# Returns the compiler flag for the SIMD intrinsics suffix specified by the
# SUFFIX argument via the variable specified by the VARIABLE argument:
# draco_get_intrinsics_flag_for_suffix(SUFFIX <suffix> VARIABLE <var name>)
macro(draco_get_intrinsics_flag_for_suffix)
unset(intrinsics_SUFFIX)
unset(intrinsics_VARIABLE)
unset(optional_args)
unset(multi_value_args)
set(single_value_args SUFFIX VARIABLE)
cmake_parse_arguments(intrinsics "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT (intrinsics_SUFFIX AND intrinsics_VARIABLE))
message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: SUFFIX and "
"VARIABLE required.")
endif()
if(intrinsics_SUFFIX MATCHES "neon")
if(NOT MSVC)
set(${intrinsics_VARIABLE} "${DRACO_NEON_INTRINSICS_FLAG}")
endif()
elseif(intrinsics_SUFFIX MATCHES "sse4")
if(NOT MSVC)
set(${intrinsics_VARIABLE} "-msse4.1")
endif()
else()
message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: Unknown "
"instrinics suffix: ${intrinsics_SUFFIX}")
endif()
if(DRACO_VERBOSE GREATER 1)
message("draco_get_intrinsics_flag_for_suffix: "
"suffix:${intrinsics_SUFFIX} flag:${${intrinsics_VARIABLE}}")
endif()
endmacro()
# Processes source files specified by SOURCES and adds intrinsics flags as
# necessary: draco_process_intrinsics_sources(SOURCES <sources>)
#
# Detects requirement for intrinsics flags using source file name suffix.
# Currently supports only SSE4.1.
macro(draco_process_intrinsics_sources)
unset(arg_TARGET)
unset(arg_SOURCES)
unset(optional_args)
set(single_value_args TARGET)
set(multi_value_args SOURCES)
cmake_parse_arguments(arg "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT (arg_TARGET AND arg_SOURCES))
message(FATAL_ERROR "draco_process_intrinsics_sources: TARGET and "
"SOURCES required.")
endif()
if(DRACO_ENABLE_SSE4_1 AND draco_have_sse4)
unset(sse4_sources)
list(APPEND sse4_sources ${arg_SOURCES})
list(FILTER sse4_sources INCLUDE REGEX
"${draco_sse4_source_file_suffix}$")
if(sse4_sources)
unset(sse4_flags)
draco_get_intrinsics_flag_for_suffix(SUFFIX
${draco_sse4_source_file_suffix}
VARIABLE sse4_flags)
if(sse4_flags)
draco_set_compiler_flags_for_sources(SOURCES ${sse4_sources} FLAGS
${sse4_flags})
endif()
endif()
endif()
if(DRACO_ENABLE_NEON AND draco_have_neon)
unset(neon_sources)
list(APPEND neon_sources ${arg_SOURCES})
list(FILTER neon_sources INCLUDE REGEX
"${draco_neon_source_file_suffix}$")
if(neon_sources AND DRACO_NEON_INTRINSICS_FLAG)
unset(neon_flags)
draco_get_intrinsics_flag_for_suffix(SUFFIX
${draco_neon_source_file_suffix}
VARIABLE neon_flags)
if(neon_flags)
draco_set_compiler_flags_for_sources(SOURCES ${neon_sources} FLAGS
${neon_flags})
endif()
endif()
endif()
endmacro()

View File

@ -0,0 +1,239 @@
if(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_OPTIONS_CMAKE_
set(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_)
set(draco_features_file_name "${draco_build}/draco/draco_features.h")
set(draco_features_list)
# Simple wrapper for CMake's builtin option command that tracks draco's build
# options in the list variable $draco_options.
macro(draco_option)
unset(option_NAME)
unset(option_HELPSTRING)
unset(option_VALUE)
unset(optional_args)
unset(multi_value_args)
set(single_value_args NAME HELPSTRING VALUE)
cmake_parse_arguments(option "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT (option_NAME AND option_HELPSTRING AND DEFINED option_VALUE))
message(FATAL_ERROR "draco_option: NAME HELPSTRING and VALUE required.")
endif()
option(${option_NAME} ${option_HELPSTRING} ${option_VALUE})
if(DRACO_VERBOSE GREATER 2)
message("--------- draco_option ---------\n" "option_NAME=${option_NAME}\n"
"option_HELPSTRING=${option_HELPSTRING}\n"
"option_VALUE=${option_VALUE}\n"
"------------------------------------------\n")
endif()
list(APPEND draco_options ${option_NAME})
list(REMOVE_DUPLICATES draco_options)
endmacro()
# Dumps the $draco_options list via CMake message command.
macro(draco_dump_options)
foreach(option_name ${draco_options})
message("${option_name}: ${${option_name}}")
endforeach()
endmacro()
# Set default options.
macro(draco_set_default_options)
draco_option(NAME DRACO_FAST HELPSTRING "Try to build faster libs." VALUE OFF)
draco_option(NAME DRACO_JS_GLUE HELPSTRING
"Enable JS Glue and JS targets when using Emscripten." VALUE ON)
draco_option(NAME DRACO_IE_COMPATIBLE HELPSTRING
"Enable support for older IE builds when using Emscripten." VALUE
OFF)
draco_option(NAME DRACO_MESH_COMPRESSION HELPSTRING "Enable mesh compression."
VALUE ON)
draco_option(NAME DRACO_POINT_CLOUD_COMPRESSION HELPSTRING
"Enable point cloud compression." VALUE ON)
draco_option(NAME DRACO_PREDICTIVE_EDGEBREAKER HELPSTRING
"Enable predictive edgebreaker." VALUE ON)
draco_option(NAME DRACO_STANDARD_EDGEBREAKER HELPSTRING
"Enable stand edgebreaker." VALUE ON)
draco_option(NAME DRACO_BACKWARDS_COMPATIBILITY HELPSTRING
"Enable backwards compatibility." VALUE ON)
draco_option(NAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION HELPSTRING
"Enable attribute deduping." VALUE OFF)
draco_option(NAME DRACO_TESTS HELPSTRING "Enables tests." VALUE OFF)
draco_option(NAME DRACO_WASM HELPSTRING "Enables WASM support." VALUE OFF)
draco_option(NAME DRACO_UNITY_PLUGIN HELPSTRING
"Build plugin library for Unity." VALUE OFF)
draco_option(NAME DRACO_ANIMATION_ENCODING HELPSTRING "Enable animation."
VALUE OFF)
draco_option(NAME DRACO_GLTF HELPSTRING "Support GLTF." VALUE OFF)
draco_option(NAME DRACO_MAYA_PLUGIN HELPSTRING
"Build plugin library for Maya." VALUE OFF)
draco_check_deprecated_options()
endmacro()
# Warns when a deprecated option is used and sets the option that replaced it.
macro(draco_handle_deprecated_option)
unset(option_OLDNAME)
unset(option_NEWNAME)
unset(optional_args)
unset(multi_value_args)
set(single_value_args OLDNAME NEWNAME)
cmake_parse_arguments(option "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if("${${option_OLDNAME}}")
message(WARNING "${option_OLDNAME} is deprecated. Use ${option_NEWNAME}.")
set(${option_NEWNAME} ${${option_OLDNAME}})
endif()
endmacro()
# Checks for use of deprecated options.
macro(draco_check_deprecated_options)
draco_handle_deprecated_option(OLDNAME ENABLE_EXTRA_SPEED NEWNAME DRACO_FAST)
draco_handle_deprecated_option(OLDNAME ENABLE_JS_GLUE NEWNAME DRACO_JS_GLUE)
draco_handle_deprecated_option(OLDNAME ENABLE_MESH_COMPRESSION NEWNAME
DRACO_MESH_COMPRESSION)
draco_handle_deprecated_option(OLDNAME ENABLE_POINT_CLOUD_COMPRESSION NEWNAME
DRACO_POINT_CLOUD_COMPRESSION)
draco_handle_deprecated_option(OLDNAME ENABLE_PREDICTIVE_EDGEBREAKER NEWNAME
DRACO_PREDICTIVE_EDGEBREAKER)
draco_handle_deprecated_option(OLDNAME ENABLE_STANDARD_EDGEBREAKER NEWNAME
DRACO_STANDARD_EDGEBREAKER)
draco_handle_deprecated_option(OLDNAME ENABLE_BACKWARDS_COMPATIBILITY NEWNAME
DRACO_BACKWARDS_COMPATIBILITY)
draco_handle_deprecated_option(OLDNAME ENABLE_DECODER_ATTRIBUTE_DEDUPLICATION
NEWNAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION)
draco_handle_deprecated_option(OLDNAME ENABLE_TESTS NEWNAME DRACO_TESTS)
draco_handle_deprecated_option(OLDNAME ENABLE_WASM NEWNAME DRACO_WASM)
draco_handle_deprecated_option(OLDNAME BUILD_UNITY_PLUGIN NEWNAME
DRACO_UNITY_PLUGIN)
draco_handle_deprecated_option(OLDNAME BUILD_ANIMATION_ENCODING NEWNAME
DRACO_ANIMATION_ENCODING)
draco_handle_deprecated_option(OLDNAME BUILD_FOR_GLTF NEWNAME DRACO_GLTF)
draco_handle_deprecated_option(OLDNAME BUILD_MAYA_PLUGIN NEWNAME
DRACO_MAYA_PLUGIN)
draco_handle_deprecated_option(OLDNAME BUILD_USD_PLUGIN NEWNAME
BUILD_SHARED_LIBS)
endmacro()
# Macro for setting Draco features based on user configuration. Features enabled
# by this macro are Draco global.
macro(draco_set_optional_features)
if(DRACO_GLTF)
# Override settings when building for GLTF.
draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
else()
if(DRACO_POINT_CLOUD_COMPRESSION)
draco_enable_feature(FEATURE "DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED")
endif()
if(DRACO_MESH_COMPRESSION)
draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED")
if(DRACO_STANDARD_EDGEBREAKER)
draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
endif()
if(DRACO_PREDICTIVE_EDGEBREAKER)
draco_enable_feature(FEATURE "DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED")
endif()
endif()
if(DRACO_BACKWARDS_COMPATIBILITY)
draco_enable_feature(FEATURE "DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED")
endif()
if(NOT EMSCRIPTEN)
# For now, enable deduplication for both encoder and decoder.
# TODO(ostava): Support for disabling attribute deduplication for the C++
# decoder is planned in future releases.
draco_enable_feature(FEATURE
DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED)
draco_enable_feature(FEATURE
DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED)
endif()
endif()
if(DRACO_UNITY_PLUGIN)
draco_enable_feature(FEATURE "DRACO_UNITY_PLUGIN")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
if(DRACO_MAYA_PLUGIN)
draco_enable_feature(FEATURE "DRACO_MAYA_PLUGIN")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
endmacro()
# Macro that handles tracking of Draco preprocessor symbols for the purpose of
# producing draco_features.h.
#
# ~~~
# draco_enable_feature(FEATURE <feature_name> [TARGETS <target_name>])
# ~~~
#
# FEATURE is required. It should be a Draco preprocessor symbol. TARGETS is
# optional. It can be one or more draco targets.
#
# When the TARGETS argument is not present the preproc symbol is added to
# draco_features.h. When it is draco_features.h is unchanged, and
# target_compile_options() is called for each target specified.
macro(draco_enable_feature)
set(def_flags)
set(def_single_arg_opts FEATURE)
set(def_multi_arg_opts TARGETS)
cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}"
"${def_multi_arg_opts}" ${ARGN})
if("${DEF_FEATURE}" STREQUAL "")
message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().")
endif()
# Do nothing/return early if $DEF_FEATURE is already in the list.
list(FIND draco_features_list ${DEF_FEATURE} df_index)
if(NOT df_index EQUAL -1)
return()
endif()
list(LENGTH DEF_TARGETS df_targets_list_length)
if(${df_targets_list_length} EQUAL 0)
list(APPEND draco_features_list ${DEF_FEATURE})
else()
foreach(target ${DEF_TARGETS})
target_compile_definitions(${target} PRIVATE ${DEF_FEATURE})
endforeach()
endif()
endmacro()
# Function for generating draco_features.h.
function(draco_generate_features_h)
file(WRITE "${draco_features_file_name}.new"
"// GENERATED FILE -- DO NOT EDIT\n\n" "#ifndef DRACO_FEATURES_H_\n"
"#define DRACO_FEATURES_H_\n\n")
foreach(feature ${draco_features_list})
file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n")
endforeach()
file(APPEND "${draco_features_file_name}.new"
"\n#endif // DRACO_FEATURES_H_")
# Will replace ${draco_features_file_name} only if the file content has
# changed. This prevents forced Draco rebuilds after CMake runs.
configure_file("${draco_features_file_name}.new"
"${draco_features_file_name}")
file(REMOVE "${draco_features_file_name}.new")
endfunction()
# Sets default options for the build and processes user controlled options to
# compute enabled features.
macro(draco_setup_options)
draco_set_default_options()
draco_set_optional_features()
endmacro()

View File

@ -0,0 +1,32 @@
if(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_SANITIZER_CMAKE_
set(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_ 1)
# Handles the details of enabling sanitizers.
macro(draco_configure_sanitizer)
if(DRACO_SANITIZE AND NOT MSVC)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if(DRACO_SANITIZE MATCHES "cfi")
list(APPEND DRACO_CXX_FLAGS "-flto" "-fno-sanitize-trap=cfi")
list(APPEND DRACO_EXE_LINKER_FLAGS "-flto" "-fno-sanitize-trap=cfi"
"-fuse-ld=gold")
endif()
if(${CMAKE_SIZEOF_VOID_P} EQUAL 4
AND DRACO_SANITIZE MATCHES "integer|undefined")
list(APPEND DRACO_EXE_LINKER_FLAGS "--rtlib=compiler-rt" "-lgcc_s")
endif()
endif()
list(APPEND DRACO_CXX_FLAGS "-fsanitize=${DRACO_SANITIZE}")
list(APPEND DRACO_EXE_LINKER_FLAGS "-fsanitize=${DRACO_SANITIZE}")
# Make sanitizer callstacks accurate.
list(APPEND DRACO_CXX_FLAGS "-fno-omit-frame-pointer"
"-fno-optimize-sibling-calls")
draco_test_cxx_flag(FLAG_LIST_VAR_NAMES DRACO_CXX_FLAGS FLAG_REQUIRED)
draco_test_exe_linker_flag(FLAG_LIST_VAR_NAME DRACO_EXE_LINKER_FLAGS)
endif()
endmacro()

View File

@ -0,0 +1,349 @@
if(DRACO_CMAKE_DRACO_TARGETS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_TARGETS_CMAKE_
set(DRACO_CMAKE_DRACO_TARGETS_CMAKE_ 1)
# Resets list variables used to track draco targets.
macro(draco_reset_target_lists)
unset(draco_targets)
unset(draco_exe_targets)
unset(draco_lib_targets)
unset(draco_objlib_targets)
unset(draco_module_targets)
unset(draco_sources)
unset(draco_test_targets)
endmacro()
# Creates an executable target. The target name is passed as a parameter to the
# NAME argument, and the sources passed as a parameter to the SOURCES argument:
# draco_add_executable(NAME <name> SOURCES <sources> [optional args])
#
# Optional args:
# cmake-format: off
# - OUTPUT_NAME: Override output file basename. Target basename defaults to
# NAME.
# - TEST: Flag. Presence means treat executable as a test.
# - DEFINES: List of preprocessor macro definitions.
# - INCLUDES: list of include directories for the target.
# - COMPILE_FLAGS: list of compiler flags for the target.
# - LINK_FLAGS: List of linker flags for the target.
# - OBJLIB_DEPS: List of CMake object library target dependencies.
# - LIB_DEPS: List of CMake library dependencies.
# cmake-format: on
#
# Sources passed to this macro are added to $draco_test_sources when TEST is
# specified. Otherwise sources are added to $draco_sources.
#
# Targets passed to this macro are always added to the $draco_targets list. When
# TEST is specified targets are also added to the $draco_test_targets list.
# Otherwise targets are added to $draco_exe_targets.
macro(draco_add_executable)
unset(exe_TEST)
unset(exe_TEST_DEFINES_MAIN)
unset(exe_NAME)
unset(exe_OUTPUT_NAME)
unset(exe_SOURCES)
unset(exe_DEFINES)
unset(exe_INCLUDES)
unset(exe_COMPILE_FLAGS)
unset(exe_LINK_FLAGS)
unset(exe_OBJLIB_DEPS)
unset(exe_LIB_DEPS)
set(optional_args TEST)
set(single_value_args NAME OUTPUT_NAME)
set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS
OBJLIB_DEPS LIB_DEPS)
cmake_parse_arguments(exe "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
message("--------- draco_add_executable ---------\n"
"exe_TEST=${exe_TEST}\n"
"exe_TEST_DEFINES_MAIN=${exe_TEST_DEFINES_MAIN}\n"
"exe_NAME=${exe_NAME}\n"
"exe_OUTPUT_NAME=${exe_OUTPUT_NAME}\n"
"exe_SOURCES=${exe_SOURCES}\n"
"exe_DEFINES=${exe_DEFINES}\n"
"exe_INCLUDES=${exe_INCLUDES}\n"
"exe_COMPILE_FLAGS=${exe_COMPILE_FLAGS}\n"
"exe_LINK_FLAGS=${exe_LINK_FLAGS}\n"
"exe_OBJLIB_DEPS=${exe_OBJLIB_DEPS}\n"
"exe_LIB_DEPS=${exe_LIB_DEPS}\n"
"------------------------------------------\n")
endif()
if(NOT (exe_NAME AND exe_SOURCES))
message(FATAL_ERROR "draco_add_executable: NAME and SOURCES required.")
endif()
list(APPEND draco_targets ${exe_NAME})
if(exe_TEST)
list(APPEND draco_test_targets ${exe_NAME})
list(APPEND draco_test_sources ${exe_SOURCES})
else()
list(APPEND draco_exe_targets ${exe_NAME})
list(APPEND draco_sources ${exe_SOURCES})
endif()
add_executable(${exe_NAME} ${exe_SOURCES})
if(exe_OUTPUT_NAME)
set_target_properties(${exe_NAME} PROPERTIES OUTPUT_NAME ${exe_OUTPUT_NAME})
endif()
draco_process_intrinsics_sources(TARGET ${exe_NAME} SOURCES ${exe_SOURCES})
if(exe_DEFINES)
target_compile_definitions(${exe_NAME} PRIVATE ${exe_DEFINES})
endif()
if(exe_INCLUDES)
target_include_directories(${exe_NAME} PRIVATE ${exe_INCLUDES})
endif()
if(exe_COMPILE_FLAGS OR DRACO_CXX_FLAGS)
target_compile_options(${exe_NAME}
PRIVATE ${exe_COMPILE_FLAGS} ${DRACO_CXX_FLAGS})
endif()
if(exe_LINK_FLAGS OR DRACO_EXE_LINKER_FLAGS)
if(${CMAKE_VERSION} VERSION_LESS "3.13")
set(link_flags ${exe_LINK_FLAGS} ${DRACO_EXE_LINKER_FLAGS})
set_target_properties(${exe_NAME}
PROPERTIES LINK_FLAGS ${exe_LINK_FLAGS}
${DRACO_EXE_LINKER_FLAGS})
else()
target_link_options(${exe_NAME} PRIVATE ${exe_LINK_FLAGS}
${DRACO_EXE_LINKER_FLAGS})
endif()
endif()
if(exe_OBJLIB_DEPS)
foreach(objlib_dep ${exe_OBJLIB_DEPS})
target_sources(${exe_NAME} PRIVATE $<TARGET_OBJECTS:${objlib_dep}>)
endforeach()
endif()
if(CMAKE_THREAD_LIBS_INIT)
list(APPEND exe_LIB_DEPS ${CMAKE_THREAD_LIBS_INIT})
endif()
if(BUILD_SHARED_LIBS AND (MSVC OR WIN32))
target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=0")
endif()
if(exe_LIB_DEPS)
unset(exe_static)
if("${CMAKE_EXE_LINKER_FLAGS} ${DRACO_EXE_LINKER_FLAGS}" MATCHES "static")
set(exe_static ON)
endif()
if(exe_static AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
# Third party dependencies can introduce dependencies on system and test
# libraries. Since the target created here is an executable, and CMake
# does not provide a method of controlling order of link dependencies,
# wrap all of the dependencies of this target in start/end group flags to
# ensure that dependencies of third party targets can be resolved when
# those dependencies happen to be resolved by dependencies of the current
# target.
list(INSERT exe_LIB_DEPS 0 -Wl,--start-group)
list(APPEND exe_LIB_DEPS -Wl,--end-group)
endif()
target_link_libraries(${exe_NAME} PRIVATE ${exe_LIB_DEPS})
endif()
endmacro()
# Creates a library target of the specified type. The target name is passed as a
# parameter to the NAME argument, the type as a parameter to the TYPE argument,
# and the sources passed as a parameter to the SOURCES argument:
# draco_add_library(NAME <name> TYPE <type> SOURCES <sources> [optional args])
#
# Optional args:
# cmake-format: off
# - OUTPUT_NAME: Override output file basename. Target basename defaults to
# NAME. OUTPUT_NAME is ignored when BUILD_SHARED_LIBS is enabled and CMake
# is generating a build for which MSVC or WIN32 are true. This is to avoid
# output basename collisions with DLL import libraries.
# - TEST: Flag. Presence means treat library as a test.
# - DEFINES: List of preprocessor macro definitions.
# - INCLUDES: list of include directories for the target.
# - COMPILE_FLAGS: list of compiler flags for the target.
# - LINK_FLAGS: List of linker flags for the target.
# - OBJLIB_DEPS: List of CMake object library target dependencies.
# - LIB_DEPS: List of CMake library dependencies.
# - PUBLIC_INCLUDES: List of include paths to export to dependents.
# cmake-format: on
#
# Sources passed to the macro are added to the lists tracking draco sources:
# cmake-format: off
# - When TEST is specified sources are added to $draco_test_sources.
# - Otherwise sources are added to $draco_sources.
# cmake-format: on
#
# Targets passed to this macro are added to the lists tracking draco targets:
# cmake-format: off
# - Targets are always added to $draco_targets.
# - When the TEST flag is specified, targets are added to
# $draco_test_targets.
# - When TEST is not specified:
# - Libraries of type SHARED are added to $draco_dylib_targets.
# - Libraries of type OBJECT are added to $draco_objlib_targets.
# - Libraries of type STATIC are added to $draco_lib_targets.
# cmake-format: on
macro(draco_add_library)
unset(lib_TEST)
unset(lib_NAME)
unset(lib_OUTPUT_NAME)
unset(lib_TYPE)
unset(lib_SOURCES)
unset(lib_DEFINES)
unset(lib_INCLUDES)
unset(lib_COMPILE_FLAGS)
unset(lib_LINK_FLAGS)
unset(lib_OBJLIB_DEPS)
unset(lib_LIB_DEPS)
unset(lib_PUBLIC_INCLUDES)
unset(lib_TARGET_PROPERTIES)
set(optional_args TEST)
set(single_value_args NAME OUTPUT_NAME TYPE)
set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS
OBJLIB_DEPS LIB_DEPS PUBLIC_INCLUDES TARGET_PROPERTIES)
cmake_parse_arguments(lib "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
message("--------- draco_add_library ---------\n"
"lib_TEST=${lib_TEST}\n"
"lib_NAME=${lib_NAME}\n"
"lib_OUTPUT_NAME=${lib_OUTPUT_NAME}\n"
"lib_TYPE=${lib_TYPE}\n"
"lib_SOURCES=${lib_SOURCES}\n"
"lib_DEFINES=${lib_DEFINES}\n"
"lib_INCLUDES=${lib_INCLUDES}\n"
"lib_COMPILE_FLAGS=${lib_COMPILE_FLAGS}\n"
"lib_LINK_FLAGS=${lib_LINK_FLAGS}\n"
"lib_OBJLIB_DEPS=${lib_OBJLIB_DEPS}\n"
"lib_LIB_DEPS=${lib_LIB_DEPS}\n"
"lib_PUBLIC_INCLUDES=${lib_PUBLIC_INCLUDES}\n"
"---------------------------------------\n")
endif()
if(NOT (lib_NAME AND lib_TYPE))
message(FATAL_ERROR "draco_add_library: NAME and TYPE required.")
endif()
list(APPEND draco_targets ${lib_NAME})
if(lib_TEST)
list(APPEND draco_test_targets ${lib_NAME})
list(APPEND draco_test_sources ${lib_SOURCES})
else()
list(APPEND draco_sources ${lib_SOURCES})
if(lib_TYPE STREQUAL MODULE)
list(APPEND draco_module_targets ${lib_NAME})
elseif(lib_TYPE STREQUAL OBJECT)
list(APPEND draco_objlib_targets ${lib_NAME})
elseif(lib_TYPE STREQUAL SHARED)
list(APPEND draco_dylib_targets ${lib_NAME})
elseif(lib_TYPE STREQUAL STATIC)
list(APPEND draco_lib_targets ${lib_NAME})
else()
message(WARNING "draco_add_library: Unhandled type: ${lib_TYPE}")
endif()
endif()
add_library(${lib_NAME} ${lib_TYPE} ${lib_SOURCES})
if(lib_SOURCES)
draco_process_intrinsics_sources(TARGET ${lib_NAME} SOURCES ${lib_SOURCES})
endif()
if(lib_OUTPUT_NAME)
if(NOT (BUILD_SHARED_LIBS AND (MSVC OR WIN32)))
set_target_properties(${lib_NAME}
PROPERTIES OUTPUT_NAME ${lib_OUTPUT_NAME})
endif()
endif()
if(lib_DEFINES)
target_compile_definitions(${lib_NAME} PRIVATE ${lib_DEFINES})
endif()
if(lib_INCLUDES)
target_include_directories(${lib_NAME} PRIVATE ${lib_INCLUDES})
endif()
if(lib_PUBLIC_INCLUDES)
target_include_directories(${lib_NAME} PUBLIC ${lib_PUBLIC_INCLUDES})
endif()
if(lib_COMPILE_FLAGS OR DRACO_CXX_FLAGS)
target_compile_options(${lib_NAME}
PRIVATE ${lib_COMPILE_FLAGS} ${DRACO_CXX_FLAGS})
endif()
if(lib_LINK_FLAGS)
set_target_properties(${lib_NAME} PROPERTIES LINK_FLAGS ${lib_LINK_FLAGS})
endif()
if(lib_OBJLIB_DEPS)
foreach(objlib_dep ${lib_OBJLIB_DEPS})
target_sources(${lib_NAME} PRIVATE $<TARGET_OBJECTS:${objlib_dep}>)
endforeach()
endif()
if(lib_LIB_DEPS)
if(lib_TYPE STREQUAL STATIC)
set(link_type PUBLIC)
else()
set(link_type PRIVATE)
if(lib_TYPE STREQUAL SHARED AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
# The draco shared object uses the static draco as input to turn it into
# a shared object. Include everything from the static library in the
# shared object.
if(APPLE)
list(INSERT lib_LIB_DEPS 0 -Wl,-force_load)
else()
list(INSERT lib_LIB_DEPS 0 -Wl,--whole-archive)
list(APPEND lib_LIB_DEPS -Wl,--no-whole-archive)
endif()
endif()
endif()
target_link_libraries(${lib_NAME} ${link_type} ${lib_LIB_DEPS})
endif()
if(NOT MSVC AND lib_NAME MATCHES "^lib")
# Non-MSVC generators prepend lib to static lib target file names. Libdraco
# already includes lib in its name. Avoid naming output files liblib*.
set_target_properties(${lib_NAME} PROPERTIES PREFIX "")
endif()
if(lib_TYPE STREQUAL SHARED AND NOT MSVC)
set_target_properties(${lib_NAME} PROPERTIES SOVERSION ${DRACO_SOVERSION})
endif()
if(BUILD_SHARED_LIBS AND (MSVC OR WIN32))
if(lib_TYPE STREQUAL SHARED)
target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=1")
else()
target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=0")
endif()
endif()
# Determine if $lib_NAME is a header only target.
unset(sources_list)
if(lib_SOURCES)
set(sources_list ${lib_SOURCES})
list(FILTER sources_list INCLUDE REGEX cc$)
endif()
if(NOT sources_list)
if(NOT XCODE)
# This is a header only target. Tell CMake the link language.
set_target_properties(${lib_NAME} PROPERTIES LINKER_LANGUAGE CXX)
else()
# The Xcode generator ignores LINKER_LANGUAGE. Add a dummy cc file.
draco_create_dummy_source_file(TARGET ${lib_NAME} BASENAME ${lib_NAME})
endif()
endif()
endmacro()

View File

@ -0,0 +1,13 @@
#ifndef DRACO_TESTING_DRACO_TEST_CONFIG_H_
#define DRACO_TESTING_DRACO_TEST_CONFIG_H_
// If this file is named draco_test_config.h.cmake:
// This file is used as input at cmake generation time.
// If this file is named draco_test_config.h:
// GENERATED FILE, DO NOT EDIT. SEE ABOVE.
#define DRACO_TEST_DATA_DIR "${DRACO_TEST_DATA_DIR}"
#define DRACO_TEST_TEMP_DIR "${DRACO_TEST_TEMP_DIR}"
#endif // DRACO_TESTING_DRACO_TEST_CONFIG_H_

View File

@ -0,0 +1,133 @@
if(DRACO_CMAKE_DRACO_TESTS_CMAKE)
return()
endif()
set(DRACO_CMAKE_DRACO_TESTS_CMAKE 1)
# The factory tests are in a separate target to avoid breaking tests that rely
# on file I/O via the factories. The fake reader and writer implementations
# interfere with normal file I/O function.
set(draco_factory_test_sources
"${draco_src_root}/io/file_reader_factory_test.cc"
"${draco_src_root}/io/file_writer_factory_test.cc")
list(
APPEND
draco_test_sources
"${draco_src_root}/animation/keyframe_animation_encoding_test.cc"
"${draco_src_root}/animation/keyframe_animation_test.cc"
"${draco_src_root}/attributes/point_attribute_test.cc"
"${draco_src_root}/compression/attributes/point_d_vector_test.cc"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc"
"${draco_src_root}/compression/attributes/sequential_integer_attribute_encoding_test.cc"
"${draco_src_root}/compression/bit_coders/rans_coding_test.cc"
"${draco_src_root}/compression/decode_test.cc"
"${draco_src_root}/compression/encode_test.cc"
"${draco_src_root}/compression/entropy/shannon_entropy_test.cc"
"${draco_src_root}/compression/entropy/symbol_coding_test.cc"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_encoding_test.cc"
"${draco_src_root}/compression/mesh/mesh_encoder_test.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoding_test.cc"
"${draco_src_root}/core/buffer_bit_coding_test.cc"
"${draco_src_root}/core/draco_test_base.h"
"${draco_src_root}/core/draco_test_utils.cc"
"${draco_src_root}/core/draco_test_utils.h"
"${draco_src_root}/core/math_utils_test.cc"
"${draco_src_root}/core/quantization_utils_test.cc"
"${draco_src_root}/core/status_test.cc"
"${draco_src_root}/core/vector_d_test.cc"
"${draco_src_root}/io/file_reader_test_common.h"
"${draco_src_root}/io/file_utils_test.cc"
"${draco_src_root}/io/stdio_file_reader_test.cc"
"${draco_src_root}/io/stdio_file_writer_test.cc"
"${draco_src_root}/io/obj_decoder_test.cc"
"${draco_src_root}/io/obj_encoder_test.cc"
"${draco_src_root}/io/ply_decoder_test.cc"
"${draco_src_root}/io/ply_reader_test.cc"
"${draco_src_root}/io/point_cloud_io_test.cc"
"${draco_src_root}/mesh/mesh_are_equivalent_test.cc"
"${draco_src_root}/mesh/mesh_cleanup_test.cc"
"${draco_src_root}/mesh/triangle_soup_mesh_builder_test.cc"
"${draco_src_root}/metadata/metadata_encoder_test.cc"
"${draco_src_root}/metadata/metadata_test.cc"
"${draco_src_root}/point_cloud/point_cloud_builder_test.cc"
"${draco_src_root}/point_cloud/point_cloud_test.cc")
list(APPEND draco_gtest_all
"${draco_root}/../googletest/googletest/src/gtest-all.cc")
list(APPEND draco_gtest_main
"${draco_root}/../googletest/googletest/src/gtest_main.cc")
macro(draco_setup_test_targets)
if(DRACO_TESTS)
if(NOT (EXISTS ${draco_gtest_all} AND EXISTS ${draco_gtest_main}))
message(FATAL "googletest must be a sibling directory of ${draco_root}.")
endif()
list(APPEND draco_test_defines GTEST_HAS_PTHREAD=0)
draco_add_library(TEST
NAME
draco_gtest
TYPE
STATIC
SOURCES
${draco_gtest_all}
DEFINES
${draco_defines}
${draco_test_defines}
INCLUDES
${draco_test_include_paths})
draco_add_library(TEST
NAME
draco_gtest_main
TYPE
STATIC
SOURCES
${draco_gtest_main}
DEFINES
${draco_defines}
${draco_test_defines}
INCLUDES
${draco_test_include_paths})
set(DRACO_TEST_DATA_DIR "${draco_root}/testdata")
set(DRACO_TEST_TEMP_DIR "${draco_build}/draco_test_temp")
file(MAKE_DIRECTORY "${DRACO_TEST_TEMP_DIR}")
# Sets DRACO_TEST_DATA_DIR and DRACO_TEST_TEMP_DIR.
configure_file("${draco_root}/cmake/draco_test_config.h.cmake"
"${draco_build}/testing/draco_test_config.h")
# Create the test targets.
draco_add_executable(NAME
draco_tests
SOURCES
${draco_test_sources}
DEFINES
${draco_defines}
${draco_test_defines}
INCLUDES
${draco_test_include_paths}
LIB_DEPS
draco_static
draco_gtest
draco_gtest_main)
draco_add_executable(NAME
draco_factory_tests
SOURCES
${draco_factory_test_sources}
DEFINES
${draco_defines}
${draco_test_defines}
INCLUDES
${draco_test_include_paths}
LIB_DEPS
draco_static
draco_gtest
draco_gtest_main)
endif()
endmacro()

View File

@ -0,0 +1,64 @@
if(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_VARIABLES_CMAKE_
set(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_ 1)
# Halts generation when $variable_name does not refer to a directory that
# exists.
macro(draco_variable_must_be_directory variable_name)
if("${variable_name}" STREQUAL "")
message(
FATAL_ERROR
"Empty variable_name passed to draco_variable_must_be_directory.")
endif()
if("${${variable_name}}" STREQUAL "")
message(
FATAL_ERROR
"Empty variable ${variable_name} is required to build draco.")
endif()
if(NOT IS_DIRECTORY "${${variable_name}}")
message(
FATAL_ERROR
"${variable_name}, which is ${${variable_name}}, does not refer to a\n"
"directory.")
endif()
endmacro()
# Adds $var_name to the tracked variables list.
macro(draco_track_configuration_variable var_name)
if(DRACO_VERBOSE GREATER 2)
message("---- draco_track_configuration_variable ----\n"
"var_name=${var_name}\n"
"----------------------------------------------\n")
endif()
list(APPEND draco_configuration_variables ${var_name})
list(REMOVE_DUPLICATES draco_configuration_variables)
endmacro()
# Logs current C++ and executable linker flags via the CMake message command.
macro(draco_dump_cmake_flag_variables)
unset(flag_variables)
list(APPEND flag_variables "CMAKE_CXX_FLAGS_INIT" "CMAKE_CXX_FLAGS"
"CMAKE_EXE_LINKER_FLAGS_INIT" "CMAKE_EXE_LINKER_FLAGS")
if(CMAKE_BUILD_TYPE)
list(APPEND flag_variables "CMAKE_BUILD_TYPE"
"CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
"CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}"
"CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
"CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}")
endif()
foreach(flag_variable ${flag_variables})
message("${flag_variable}:${${flag_variable}}")
endforeach()
endmacro()
# Dumps the variables tracked in $draco_configuration_variables via the CMake
# message command.
macro(draco_dump_tracked_configuration_variables)
foreach(config_variable ${draco_configuration_variables})
message("${config_variable}:${${config_variable}}")
endforeach()
endmacro()

View File

@ -0,0 +1,19 @@
if(DRACO_CMAKE_SANITIZERS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_SANITIZERS_CMAKE_ 1)
if(MSVC OR NOT SANITIZE)
return()
endif()
include("${draco_root}/cmake/compiler_flags.cmake")
string(TOLOWER ${SANITIZE} SANITIZE)
# Require the sanitizer requested.
require_linker_flag("-fsanitize=${SANITIZE}")
require_compiler_flag("-fsanitize=${SANITIZE}" YES)
# Make callstacks accurate.
require_compiler_flag("-fno-omit-frame-pointer -fno-optimize-sibling-calls" YES)

View File

@ -0,0 +1,14 @@
if(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_
set(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
set(CROSS aarch64-linux-gnu-)
endif()
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(CMAKE_CXX_FLAGS_INIT "-march=armv8-a")
set(CMAKE_SYSTEM_PROCESSOR "aarch64")

View File

@ -0,0 +1,23 @@
if(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_ 1)
# Toolchain files do not have access to cached variables:
# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate
# environment variable when loaded the first time.
if(DRACO_ANDROID_NDK_PATH)
set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}")
else()
set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}")
endif()
set(CMAKE_SYSTEM_NAME Android)
if(NOT CMAKE_ANDROID_STL_TYPE)
set(CMAKE_ANDROID_STL_TYPE c++_static)
endif()
if(NOT CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION)
set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION clang)
endif()

View File

@ -0,0 +1,39 @@
if(DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_
# Additional ANDROID_* settings are available, see:
# https://developer.android.com/ndk/guides/cmake#variables
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-21)
endif()
# Choose target architecture with:
#
# -DANDROID_ABI={armeabi-v7a,armeabi-v7a with NEON,arm64-v8a,x86,x86_64}
if(NOT ANDROID_ABI)
set(ANDROID_ABI arm64-v8a)
endif()
# Force arm mode for 32-bit targets (instead of the default thumb) to improve
# performance.
if(NOT ANDROID_ARM_MODE)
set(ANDROID_ARM_MODE arm)
endif()
# Toolchain files do not have access to cached variables:
# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate
# environment variable when loaded the first time.
if(DRACO_ANDROID_NDK_PATH)
set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}")
else()
set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}")
endif()
if(NOT DRACO_ANDROID_NDK_PATH)
message(FATAL_ERROR "DRACO_ANDROID_NDK_PATH not set.")
return()
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

View File

@ -0,0 +1,17 @@
if(DRACO_CMAKE_TOOLCHAINS_ARM_IOS_COMMON_CMAKE_)
return()
endif()
set(DRACO_CMAKE_ARM_IOS_COMMON_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Darwin")
if(CMAKE_OSX_SDK)
set(CMAKE_OSX_SYSROOT ${CMAKE_OSX_SDK})
else()
set(CMAKE_OSX_SYSROOT iphoneos)
endif()
set(CMAKE_C_COMPILER clang)
set(CMAKE_C_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")
set(CMAKE_CXX_COMPILER clang++)
set(CMAKE_CXX_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")
# TODO(tomfinegan): Handle bit code embedding.

View File

@ -0,0 +1,15 @@
if(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_
set(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
set(CROSS arm-linux-gnueabihf-)
endif()
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(CMAKE_CXX_FLAGS_INIT "-march=armv7-a -marm")
set(CMAKE_SYSTEM_PROCESSOR "armv7")
set(DRACO_NEON_INTRINSICS_FLAG "-mfpu=neon")

View File

@ -0,0 +1,16 @@
if(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANROID_PLATFORM android-21)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI arm64-v8a)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

View File

@ -0,0 +1,14 @@
if(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_ 1)
if(XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "arm64")
set(CMAKE_OSX_ARCHITECTURES "arm64")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,18 @@
if(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
# Default the cross compiler prefix to something known to work.
set(CROSS aarch64-linux-gnu-)
endif()
set(CMAKE_C_COMPILER ${CROSS}gcc)
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(AS_EXECUTABLE ${CROSS}as)
set(CMAKE_C_COMPILER_ARG1 "-march=armv8-a")
set(CMAKE_CXX_COMPILER_ARG1 "-march=armv8-a")
set(CMAKE_SYSTEM_PROCESSOR "arm64")

View File

@ -0,0 +1,16 @@
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-18)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI armeabi-v7a)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

View File

@ -0,0 +1,14 @@
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_ 1)
if(XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "armv7")
set(CMAKE_OSX_ARCHITECTURES "armv7")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,24 @@
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
# Default the cross compiler prefix to something known to work.
set(CROSS arm-linux-gnueabihf-)
endif()
if(NOT ${CROSS} MATCHES hf-$)
set(DRACO_EXTRA_TOOLCHAIN_FLAGS "-mfloat-abi=softfp")
endif()
set(CMAKE_C_COMPILER ${CROSS}gcc)
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(AS_EXECUTABLE ${CROSS}as)
set(CMAKE_C_COMPILER_ARG1
"-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}")
set(CMAKE_CXX_COMPILER_ARG1
"-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}")
set(CMAKE_SYSTEM_PROCESSOR "armv7")

View File

@ -0,0 +1,14 @@
if(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_ 1)
if(XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "armv7s")
set(CMAKE_OSX_ARCHITECTURES "armv7s")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,15 @@
if(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_ 1)
if(XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "i386")
set(CMAKE_OSX_ARCHITECTURES "i386")
set(CMAKE_OSX_SDK "iphonesimulator")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,16 @@
if(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-18)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI x86)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

View File

@ -0,0 +1,16 @@
if(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-21)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI x86_64)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

View File

@ -0,0 +1,15 @@
if(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_ 1)
if(XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "x86_64")
set(CMAKE_OSX_ARCHITECTURES "x86_64")
set(CMAKE_OSX_SDK "iphonesimulator")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,79 @@
if(DRACO_CMAKE_UTIL_CMAKE_)
return()
endif()
set(DRACO_CMAKE_UTIL_CMAKE_ 1)
# Creates dummy source file in $draco_build_dir named $basename.$extension and
# returns the full path to the dummy source file via the $out_file_path
# parameter.
function(create_dummy_source_file basename extension out_file_path)
set(dummy_source_file "${draco_build_dir}/${basename}.${extension}")
file(WRITE "${dummy_source_file}.new"
"// Generated file. DO NOT EDIT!\n"
"// ${target_name} needs a ${extension} file to force link language, \n"
"// or to silence a harmless CMake warning: Ignore me.\n"
"void ${target_name}_dummy_function(void) {}\n")
# Will replace ${dummy_source_file} only if the file content has changed.
# This prevents forced Draco rebuilds after CMake runs.
configure_file("${dummy_source_file}.new" "${dummy_source_file}")
file(REMOVE "${dummy_source_file}.new")
set(${out_file_path} ${dummy_source_file} PARENT_SCOPE)
endfunction()
# Convenience function for adding a dummy source file to $target_name using
# $extension as the file extension. Wraps create_dummy_source_file().
function(add_dummy_source_file_to_target target_name extension)
create_dummy_source_file("${target_name}" "${extension}" "dummy_source_file")
target_sources(${target_name} PRIVATE ${dummy_source_file})
endfunction()
# Extracts the version number from $version_file and returns it to the user via
# $version_string_out_var. This is achieved by finding the first instance of the
# kDracoVersion variable and then removing everything but the string literal
# assigned to the variable. Quotes and semicolon are stripped from the returned
# string.
function(extract_version_string version_file version_string_out_var)
file(STRINGS "${version_file}" draco_version REGEX "kDracoVersion")
list(GET draco_version 0 draco_version)
string(REPLACE "static const char kDracoVersion[] = " "" draco_version
"${draco_version}")
string(REPLACE ";" "" draco_version "${draco_version}")
string(REPLACE "\"" "" draco_version "${draco_version}")
set("${version_string_out_var}" "${draco_version}" PARENT_SCOPE)
endfunction()
# Sets CMake compiler launcher to $launcher_name when $launcher_name is found in
# $PATH. Warns user about ignoring build flag $launcher_flag when $launcher_name
# is not found in $PATH.
function(set_compiler_launcher launcher_flag launcher_name)
find_program(launcher_path "${launcher_name}")
if(launcher_path)
set(CMAKE_C_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE)
set(CMAKE_CXX_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE)
message("--- Using ${launcher_name} as compiler launcher.")
else()
message(
WARNING "--- Cannot find ${launcher_name}, ${launcher_flag} ignored.")
endif()
endfunction()
# Terminates CMake execution when $var_name is unset in the environment. Sets
# CMake variable to the value of the environment variable when the variable is
# present in the environment.
macro(require_variable var_name)
if("$ENV{${var_name}}" STREQUAL "")
message(FATAL_ERROR "${var_name} must be set in environment.")
endif()
set_variable_if_unset(${var_name} "")
endmacro()
# Sets $var_name to $default_value if not already set.
macro(set_variable_if_unset var_name default_value)
if(NOT "$ENV{${var_name}}" STREQUAL "")
set(${var_name} $ENV{${var_name}})
elseif(NOT ${var_name})
set(${var_name} ${default_value})
endif()
endmacro()

View File

@ -0,0 +1,54 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
namespace draco {
KeyframeAnimation::KeyframeAnimation() {}
bool KeyframeAnimation::SetTimestamps(
const std::vector<TimestampType> &timestamp) {
// Already added attributes.
const int32_t num_frames = timestamp.size();
if (num_attributes() > 0) {
// Timestamp attribute could be added only once.
if (timestamps()->size()) {
return false;
} else {
// Check if the number of frames is consistent with
// the existing keyframes.
if (num_frames != num_points()) {
return false;
}
}
} else {
// This is the first attribute.
set_num_frames(num_frames);
}
// Add attribute for time stamp data.
std::unique_ptr<PointAttribute> timestamp_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
timestamp_att->Init(GeometryAttribute::GENERIC, 1, DT_FLOAT32, false,
num_frames);
for (PointIndex i(0); i < num_frames; ++i) {
timestamp_att->SetAttributeValue(timestamp_att->mapped_index(i),
&timestamp[i.value()]);
}
this->SetAttribute(kTimestampId, std::move(timestamp_att));
return true;
}
} // namespace draco

View File

@ -0,0 +1,107 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
#include <vector>
#include "draco/point_cloud/point_cloud.h"
namespace draco {
// Class for holding keyframe animation data. It will have two or more
// attributes as a point cloud. The first attribute is always the timestamp
// of the animation. Each KeyframeAnimation could have multiple animations with
// the same number of frames. Each animation will be treated as a point
// attribute.
class KeyframeAnimation : public PointCloud {
public:
// Force time stamp to be float type.
using TimestampType = float;
KeyframeAnimation();
// Animation must have only one timestamp attribute.
// This function must be called before adding any animation data.
// Returns false if timestamp already exists.
bool SetTimestamps(const std::vector<TimestampType> &timestamp);
// Returns an id for the added animation data. This id will be used to
// identify this animation.
// Returns -1 if error, e.g. number of frames is not consistent.
// Type |T| should be consistent with |DataType|, e.g:
// float - DT_FLOAT32,
// int32_t - DT_INT32, ...
template <typename T>
int32_t AddKeyframes(DataType data_type, uint32_t num_components,
const std::vector<T> &data);
const PointAttribute *timestamps() const {
return GetAttributeByUniqueId(kTimestampId);
}
const PointAttribute *keyframes(int32_t animation_id) const {
return GetAttributeByUniqueId(animation_id);
}
// Number of frames should be equal to number points in the point cloud.
void set_num_frames(int32_t num_frames) { set_num_points(num_frames); }
int32_t num_frames() const { return static_cast<int32_t>(num_points()); }
int32_t num_animations() const { return num_attributes() - 1; }
private:
// Attribute id of timestamp is fixed to 0.
static constexpr int32_t kTimestampId = 0;
};
template <typename T>
int32_t KeyframeAnimation::AddKeyframes(DataType data_type,
uint32_t num_components,
const std::vector<T> &data) {
// TODO(draco-eng): Verify T is consistent with |data_type|.
if (num_components == 0) {
return -1;
}
// If timestamps is not added yet, then reserve attribute 0 for timestamps.
if (!num_attributes()) {
// Add a temporary attribute with 0 points to fill attribute id 0.
std::unique_ptr<PointAttribute> temp_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
temp_att->Init(GeometryAttribute::GENERIC, num_components, data_type, false,
0);
this->AddAttribute(std::move(temp_att));
set_num_frames(data.size() / num_components);
}
if (data.size() != num_components * num_frames()) {
return -1;
}
std::unique_ptr<PointAttribute> keyframe_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
keyframe_att->Init(GeometryAttribute::GENERIC, num_components, data_type,
false, num_frames());
const size_t stride = num_components;
for (PointIndex i(0); i < num_frames(); ++i) {
keyframe_att->SetAttributeValue(keyframe_att->mapped_index(i),
&data[i.value() * stride]);
}
return this->AddAttribute(std::move(keyframe_att));
}
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_H_

View File

@ -0,0 +1,30 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation_decoder.h"
namespace draco {
Status KeyframeAnimationDecoder::Decode(const DecoderOptions &options,
DecoderBuffer *in_buffer,
KeyframeAnimation *animation) {
const auto status = PointCloudSequentialDecoder::Decode(
options, in_buffer, static_cast<PointCloud *>(animation));
if (!status.ok()) {
return status;
}
return OkStatus();
}
} // namespace draco

View File

@ -0,0 +1,34 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
#include "draco/animation/keyframe_animation.h"
#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h"
namespace draco {
// Class for decoding keyframe animation.
class KeyframeAnimationDecoder : private PointCloudSequentialDecoder {
public:
KeyframeAnimationDecoder(){};
Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer,
KeyframeAnimation *animation);
};
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_

View File

@ -0,0 +1,28 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation_encoder.h"
namespace draco {
KeyframeAnimationEncoder::KeyframeAnimationEncoder() {}
Status KeyframeAnimationEncoder::EncodeKeyframeAnimation(
const KeyframeAnimation &animation, const EncoderOptions &options,
EncoderBuffer *out_buffer) {
SetPointCloud(animation);
return Encode(options, out_buffer);
}
} // namespace draco

View File

@ -0,0 +1,39 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
#include "draco/animation/keyframe_animation.h"
#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
namespace draco {
// Class for encoding keyframe animation. It takes KeyframeAnimation as a
// PointCloud and compress it. It's mostly a wrapper around PointCloudEncoder so
// that the animation module could be separated from geometry compression when
// exposed to developers.
class KeyframeAnimationEncoder : private PointCloudSequentialEncoder {
public:
KeyframeAnimationEncoder();
// Encode an animation to a buffer.
Status EncodeKeyframeAnimation(const KeyframeAnimation &animation,
const EncoderOptions &options,
EncoderBuffer *out_buffer);
};
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_

View File

@ -0,0 +1,168 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
#include "draco/animation/keyframe_animation_decoder.h"
#include "draco/animation/keyframe_animation_encoder.h"
#include "draco/core/draco_test_base.h"
#include "draco/core/draco_test_utils.h"
namespace draco {
class KeyframeAnimationEncodingTest : public ::testing::Test {
protected:
KeyframeAnimationEncodingTest() {}
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
for (int i = 0; i < timestamps_.size(); ++i)
timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
return keyframe_animation_.SetTimestamps(timestamps_);
}
int32_t CreateAndAddAnimationData(int32_t num_frames,
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
for (int i = 0; i < animation_data_.size(); ++i)
animation_data_[i] = static_cast<float>(i);
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
template <int num_components_t>
void CompareAnimationData(const KeyframeAnimation &animation0,
const KeyframeAnimation &animation1,
bool quantized) {
ASSERT_EQ(animation0.num_frames(), animation1.num_frames());
ASSERT_EQ(animation0.num_animations(), animation1.num_animations());
if (quantized) {
// TODO(hemmer) : Add test for stable quantization.
// Quantization will result in slightly different values.
// Skip comparing values.
return;
}
// Compare time stamp.
const auto timestamp_att0 = animation0.timestamps();
const auto timestamp_att1 = animation0.timestamps();
for (int i = 0; i < animation0.num_frames(); ++i) {
std::array<float, 1> att_value0;
std::array<float, 1> att_value1;
ASSERT_TRUE((timestamp_att0->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value0)));
ASSERT_TRUE((timestamp_att1->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value1)));
ASSERT_FLOAT_EQ(att_value0[0], att_value1[0]);
}
for (int animation_id = 1; animation_id < animation0.num_animations();
++animation_id) {
// Compare keyframe data.
const auto keyframe_att0 = animation0.keyframes(animation_id);
const auto keyframe_att1 = animation1.keyframes(animation_id);
ASSERT_EQ(keyframe_att0->num_components(),
keyframe_att1->num_components());
for (int i = 0; i < animation0.num_frames(); ++i) {
std::array<float, num_components_t> att_value0;
std::array<float, num_components_t> att_value1;
ASSERT_TRUE((keyframe_att0->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value0)));
ASSERT_TRUE((keyframe_att1->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value1)));
for (int j = 0; j < att_value0.size(); ++j) {
ASSERT_FLOAT_EQ(att_value0[j], att_value1[j]);
}
}
}
}
template <int num_components_t>
void TestKeyframeAnimationEncoding() {
TestKeyframeAnimationEncoding<num_components_t>(false);
}
template <int num_components_t>
void TestKeyframeAnimationEncoding(bool quantized) {
// Encode animation class.
draco::EncoderBuffer buffer;
draco::KeyframeAnimationEncoder encoder;
EncoderOptions options = EncoderOptions::CreateDefaultOptions();
if (quantized) {
// Set quantization for timestamps.
options.SetAttributeInt(0, "quantization_bits", 20);
// Set quantization for keyframes.
for (int i = 1; i <= keyframe_animation_.num_animations(); ++i) {
options.SetAttributeInt(i, "quantization_bits", 20);
}
}
ASSERT_TRUE(
encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer)
.ok());
draco::DecoderBuffer dec_decoder;
draco::KeyframeAnimationDecoder decoder;
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
// Decode animation class.
std::unique_ptr<KeyframeAnimation> decoded_animation(
new KeyframeAnimation());
DecoderOptions dec_options;
ASSERT_TRUE(
decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()).ok());
// Verify if animation before and after compression is identical.
CompareAnimationData<num_components_t>(keyframe_animation_,
*decoded_animation, quantized);
}
draco::KeyframeAnimation keyframe_animation_;
std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
std::vector<float> animation_data_;
};
TEST_F(KeyframeAnimationEncodingTest, OneComponent) {
const int num_frames = 1;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
TestKeyframeAnimationEncoding<1>();
}
TEST_F(KeyframeAnimationEncodingTest, ManyComponents) {
const int num_frames = 100;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 100), 1);
TestKeyframeAnimationEncoding<100>();
}
TEST_F(KeyframeAnimationEncodingTest, ManyComponentsWithQuantization) {
const int num_frames = 100;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 4), 1);
// Test compression with quantization.
TestKeyframeAnimationEncoding<4>(true);
}
TEST_F(KeyframeAnimationEncodingTest, MultipleAnimations) {
const int num_frames = 5;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 1);
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 2);
TestKeyframeAnimationEncoding<3>();
}
} // namespace draco

View File

@ -0,0 +1,102 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
#include "draco/core/draco_test_base.h"
namespace {
class KeyframeAnimationTest : public ::testing::Test {
protected:
KeyframeAnimationTest() {}
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
for (int i = 0; i < timestamps_.size(); ++i)
timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
return keyframe_animation_.SetTimestamps(timestamps_);
}
int32_t CreateAndAddAnimationData(int32_t num_frames,
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
for (int i = 0; i < animation_data_.size(); ++i)
animation_data_[i] = static_cast<float>(i);
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
template <int num_components_t>
void CompareAnimationData() {
// Compare time stamp.
const auto timestamp_att = keyframe_animation_.timestamps();
for (int i = 0; i < timestamps_.size(); ++i) {
std::array<float, 1> att_value;
ASSERT_TRUE((timestamp_att->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value)));
ASSERT_FLOAT_EQ(att_value[0], i);
}
// Compare keyframe data.
const auto keyframe_att = keyframe_animation_.keyframes(1);
for (int i = 0; i < animation_data_.size() / num_components_t; ++i) {
std::array<float, num_components_t> att_value;
ASSERT_TRUE((keyframe_att->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value)));
for (int j = 0; j < num_components_t; ++j) {
ASSERT_FLOAT_EQ(att_value[j], i * num_components_t + j);
}
}
}
template <int num_components_t>
void TestKeyframeAnimation(int32_t num_frames) {
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, num_components_t), 1);
CompareAnimationData<num_components_t>();
}
draco::KeyframeAnimation keyframe_animation_;
std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
std::vector<float> animation_data_;
};
// Test animation with 1 component and 10 frames.
TEST_F(KeyframeAnimationTest, OneComponent) { TestKeyframeAnimation<1>(10); }
// Test animation with 4 component and 10 frames.
TEST_F(KeyframeAnimationTest, FourComponent) { TestKeyframeAnimation<4>(10); }
// Test adding animation data before timestamp.
TEST_F(KeyframeAnimationTest, AddingAnimationFirst) {
ASSERT_EQ(CreateAndAddAnimationData(5, 1), 1);
ASSERT_TRUE(CreateAndAddTimestamps(5));
}
// Test adding timestamp more than once.
TEST_F(KeyframeAnimationTest, ErrorAddingTimestampsTwice) {
ASSERT_TRUE(CreateAndAddTimestamps(5));
ASSERT_FALSE(CreateAndAddTimestamps(5));
}
// Test animation with multiple animation data.
TEST_F(KeyframeAnimationTest, MultipleAnimationData) {
const int num_frames = 5;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 2), 2);
}
} // namespace

View File

@ -0,0 +1,145 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_octahedron_transform.h"
#include "draco/attributes/attribute_transform_type.h"
#include "draco/compression/attributes/normal_compression_utils.h"
namespace draco {
bool AttributeOctahedronTransform::InitFromAttribute(
const PointAttribute &attribute) {
const AttributeTransformData *const transform_data =
attribute.GetAttributeTransformData();
if (!transform_data ||
transform_data->transform_type() != ATTRIBUTE_OCTAHEDRON_TRANSFORM) {
return false; // Wrong transform type.
}
quantization_bits_ = transform_data->GetParameterValue<int32_t>(0);
return true;
}
void AttributeOctahedronTransform::CopyToAttributeTransformData(
AttributeTransformData *out_data) const {
out_data->set_transform_type(ATTRIBUTE_OCTAHEDRON_TRANSFORM);
out_data->AppendParameterValue(quantization_bits_);
}
bool AttributeOctahedronTransform::TransformAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) {
return GeneratePortableAttribute(attribute, point_ids,
target_attribute->size(), target_attribute);
}
bool AttributeOctahedronTransform::InverseTransformAttribute(
const PointAttribute &attribute, PointAttribute *target_attribute) {
if (target_attribute->data_type() != DT_FLOAT32) {
return false;
}
const int num_points = target_attribute->size();
const int num_components = target_attribute->num_components();
if (num_components != 3) {
return false;
}
constexpr int kEntrySize = sizeof(float) * 3;
float att_val[3];
const int32_t *source_attribute_data = reinterpret_cast<const int32_t *>(
attribute.GetAddress(AttributeValueIndex(0)));
uint8_t *target_address =
target_attribute->GetAddress(AttributeValueIndex(0));
OctahedronToolBox octahedron_tool_box;
if (!octahedron_tool_box.SetQuantizationBits(quantization_bits_)) {
return false;
}
for (uint32_t i = 0; i < num_points; ++i) {
const int32_t s = *source_attribute_data++;
const int32_t t = *source_attribute_data++;
octahedron_tool_box.QuantizedOctahedralCoordsToUnitVector(s, t, att_val);
// Store the decoded floating point values into the attribute buffer.
std::memcpy(target_address, att_val, kEntrySize);
target_address += kEntrySize;
}
return true;
}
void AttributeOctahedronTransform::SetParameters(int quantization_bits) {
quantization_bits_ = quantization_bits;
}
bool AttributeOctahedronTransform::EncodeParameters(
EncoderBuffer *encoder_buffer) const {
if (is_initialized()) {
encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
return true;
}
return false;
}
bool AttributeOctahedronTransform::DecodeParameters(
const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
uint8_t quantization_bits;
if (!decoder_buffer->Decode(&quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
return true;
}
bool AttributeOctahedronTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points, PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
// Quantize all values in the order given by point_ids into portable
// attribute.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
target_attribute->GetAddress(AttributeValueIndex(0)));
float att_val[3];
int32_t dst_index = 0;
OctahedronToolBox converter;
if (!converter.SetQuantizationBits(quantization_bits_)) {
return false;
}
if (!point_ids.empty()) {
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id =
attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
}
} else {
for (PointIndex i(0); i < num_points; ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(i);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
}
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,81 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
#include "draco/attributes/attribute_transform.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Attribute transform for attributes transformed to octahedral coordinates.
class AttributeOctahedronTransform : public AttributeTransform {
public:
AttributeOctahedronTransform() : quantization_bits_(-1) {}
// Return attribute transform type.
AttributeTransformType Type() const override {
return ATTRIBUTE_OCTAHEDRON_TRANSFORM;
}
// Try to init transform from attribute.
bool InitFromAttribute(const PointAttribute &attribute) override;
// Copy parameter values into the provided AttributeTransformData instance.
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) override;
bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) override;
// Set number of quantization bits.
void SetParameters(int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) override;
bool is_initialized() const { return quantization_bits_ != -1; }
int32_t quantization_bits() const { return quantization_bits_; }
protected:
DataType GetTransformedDataType(
const PointAttribute &attribute) const override {
return DT_UINT32;
}
int GetTransformedNumComponents(
const PointAttribute &attribute) const override {
return 2;
}
// Perform the actual transformation.
bool GeneratePortableAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
int num_points,
PointAttribute *target_attribute) const;
private:
int32_t quantization_bits_;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_

View File

@ -0,0 +1,260 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/attributes/attribute_transform_type.h"
#include "draco/core/quantization_utils.h"
namespace draco {
bool AttributeQuantizationTransform::InitFromAttribute(
const PointAttribute &attribute) {
const AttributeTransformData *const transform_data =
attribute.GetAttributeTransformData();
if (!transform_data ||
transform_data->transform_type() != ATTRIBUTE_QUANTIZATION_TRANSFORM) {
return false; // Wrong transform type.
}
int32_t byte_offset = 0;
quantization_bits_ = transform_data->GetParameterValue<int32_t>(byte_offset);
byte_offset += 4;
min_values_.resize(attribute.num_components());
for (int i = 0; i < attribute.num_components(); ++i) {
min_values_[i] = transform_data->GetParameterValue<float>(byte_offset);
byte_offset += 4;
}
range_ = transform_data->GetParameterValue<float>(byte_offset);
return true;
}
// Copy parameter values into the provided AttributeTransformData instance.
void AttributeQuantizationTransform::CopyToAttributeTransformData(
AttributeTransformData *out_data) const {
out_data->set_transform_type(ATTRIBUTE_QUANTIZATION_TRANSFORM);
out_data->AppendParameterValue(quantization_bits_);
for (int i = 0; i < min_values_.size(); ++i) {
out_data->AppendParameterValue(min_values_[i]);
}
out_data->AppendParameterValue(range_);
}
bool AttributeQuantizationTransform::TransformAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) {
if (point_ids.empty()) {
GeneratePortableAttribute(attribute, target_attribute->size(),
target_attribute);
} else {
GeneratePortableAttribute(attribute, point_ids, target_attribute->size(),
target_attribute);
}
return true;
}
bool AttributeQuantizationTransform::InverseTransformAttribute(
const PointAttribute &attribute, PointAttribute *target_attribute) {
if (target_attribute->data_type() != DT_FLOAT32) {
return false;
}
// Convert all quantized values back to floats.
const int32_t max_quantized_value =
(1u << static_cast<uint32_t>(quantization_bits_)) - 1;
const int num_components = target_attribute->num_components();
const int entry_size = sizeof(float) * num_components;
const std::unique_ptr<float[]> att_val(new float[num_components]);
int quant_val_id = 0;
int out_byte_pos = 0;
Dequantizer dequantizer;
if (!dequantizer.Init(range_, max_quantized_value)) {
return false;
}
const int32_t *const source_attribute_data =
reinterpret_cast<const int32_t *>(
attribute.GetAddress(AttributeValueIndex(0)));
const int num_values = target_attribute->size();
for (uint32_t i = 0; i < num_values; ++i) {
for (int c = 0; c < num_components; ++c) {
float value =
dequantizer.DequantizeFloat(source_attribute_data[quant_val_id++]);
value = value + min_values_[c];
att_val[c] = value;
}
// Store the floating point value into the attribute buffer.
target_attribute->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
out_byte_pos += entry_size;
}
return true;
}
bool AttributeQuantizationTransform::IsQuantizationValid(
int quantization_bits) {
// Currently we allow only up to 30 bit quantization.
return quantization_bits >= 1 && quantization_bits <= 30;
}
bool AttributeQuantizationTransform::SetParameters(int quantization_bits,
const float *min_values,
int num_components,
float range) {
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
min_values_.assign(min_values, min_values + num_components);
range_ = range;
return true;
}
bool AttributeQuantizationTransform::ComputeParameters(
const PointAttribute &attribute, const int quantization_bits) {
if (quantization_bits_ != -1) {
return false; // already initialized.
}
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
const int num_components = attribute.num_components();
range_ = 0.f;
min_values_ = std::vector<float>(num_components, 0.f);
const std::unique_ptr<float[]> max_values(new float[num_components]);
const std::unique_ptr<float[]> att_val(new float[num_components]);
// Compute minimum values and max value difference.
attribute.GetValue(AttributeValueIndex(0), att_val.get());
attribute.GetValue(AttributeValueIndex(0), min_values_.data());
attribute.GetValue(AttributeValueIndex(0), max_values.get());
for (AttributeValueIndex i(1); i < static_cast<uint32_t>(attribute.size());
++i) {
attribute.GetValue(i, att_val.get());
for (int c = 0; c < num_components; ++c) {
if (min_values_[c] > att_val[c]) {
min_values_[c] = att_val[c];
}
if (max_values[c] < att_val[c]) {
max_values[c] = att_val[c];
}
}
}
for (int c = 0; c < num_components; ++c) {
if (std::isnan(min_values_[c]) || std::isinf(min_values_[c]) ||
std::isnan(max_values[c]) || std::isinf(max_values[c])) {
return false;
}
const float dif = max_values[c] - min_values_[c];
if (dif > range_) {
range_ = dif;
}
}
// In case all values are the same, initialize the range to unit length. This
// will ensure that all values are quantized properly to the same value.
if (range_ == 0.f) {
range_ = 1.f;
}
return true;
}
bool AttributeQuantizationTransform::EncodeParameters(
EncoderBuffer *encoder_buffer) const {
if (is_initialized()) {
encoder_buffer->Encode(min_values_.data(),
sizeof(float) * min_values_.size());
encoder_buffer->Encode(range_);
encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
return true;
}
return false;
}
bool AttributeQuantizationTransform::DecodeParameters(
const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
min_values_.resize(attribute.num_components());
if (!decoder_buffer->Decode(&min_values_[0],
sizeof(float) * min_values_.size())) {
return false;
}
if (!decoder_buffer->Decode(&range_)) {
return false;
}
uint8_t quantization_bits;
if (!decoder_buffer->Decode(&quantization_bits)) {
return false;
}
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
return true;
}
void AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, int num_points,
PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
const int num_components = attribute.num_components();
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
target_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
int32_t dst_index = 0;
const std::unique_ptr<float[]> att_val(new float[num_components]);
for (PointIndex i(0); i < num_points; ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(i);
attribute.GetValue(att_val_id, att_val.get());
for (int c = 0; c < num_components; ++c) {
const float value = (att_val[c] - min_values()[c]);
const int32_t q_val = quantizer.QuantizeFloat(value);
portable_attribute_data[dst_index++] = q_val;
}
}
}
void AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points, PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
const int num_components = attribute.num_components();
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
target_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
int32_t dst_index = 0;
const std::unique_ptr<float[]> att_val(new float[num_components]);
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val.get());
for (int c = 0; c < num_components; ++c) {
const float value = (att_val[c] - min_values()[c]);
const int32_t q_val = quantizer.QuantizeFloat(value);
portable_attribute_data[dst_index++] = q_val;
}
}
}
} // namespace draco

View File

@ -0,0 +1,102 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
#include <vector>
#include "draco/attributes/attribute_transform.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Attribute transform for quantized attributes.
class AttributeQuantizationTransform : public AttributeTransform {
public:
AttributeQuantizationTransform() : quantization_bits_(-1), range_(0.f) {}
// Return attribute transform type.
AttributeTransformType Type() const override {
return ATTRIBUTE_QUANTIZATION_TRANSFORM;
}
// Try to init transform from attribute.
bool InitFromAttribute(const PointAttribute &attribute) override;
// Copy parameter values into the provided AttributeTransformData instance.
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) override;
bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) override;
bool SetParameters(int quantization_bits, const float *min_values,
int num_components, float range);
bool ComputeParameters(const PointAttribute &attribute,
const int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) override;
int32_t quantization_bits() const { return quantization_bits_; }
float min_value(int axis) const { return min_values_[axis]; }
const std::vector<float> &min_values() const { return min_values_; }
float range() const { return range_; }
bool is_initialized() const { return quantization_bits_ != -1; }
protected:
// Create portable attribute using 1:1 mapping between points in the input and
// output attribute.
void GeneratePortableAttribute(const PointAttribute &attribute,
int num_points,
PointAttribute *target_attribute) const;
// Create portable attribute using custom mapping between input and output
// points.
void GeneratePortableAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
int num_points,
PointAttribute *target_attribute) const;
DataType GetTransformedDataType(
const PointAttribute &attribute) const override {
return DT_UINT32;
}
int GetTransformedNumComponents(
const PointAttribute &attribute) const override {
return attribute.num_components();
}
static bool IsQuantizationValid(int quantization_bits);
private:
int32_t quantization_bits_;
// Minimal dequantized value for each component of the attribute.
std::vector<float> min_values_;
// Bounds of the dequantized attribute (max delta over all components).
float range_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTE_DEQUANTIZATION_TRANSFORM_H_

View File

@ -0,0 +1,40 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_transform.h"
namespace draco {
bool AttributeTransform::TransferToAttribute(PointAttribute *attribute) const {
std::unique_ptr<AttributeTransformData> transform_data(
new AttributeTransformData());
this->CopyToAttributeTransformData(transform_data.get());
attribute->SetAttributeTransformData(std::move(transform_data));
return true;
}
std::unique_ptr<PointAttribute> AttributeTransform::InitTransformedAttribute(
const PointAttribute &src_attribute, int num_entries) {
const int num_components = GetTransformedNumComponents(src_attribute);
const DataType dt = GetTransformedDataType(src_attribute);
GeometryAttribute va;
va.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false,
num_components * DataTypeLength(dt), 0);
std::unique_ptr<PointAttribute> transformed_attribute(new PointAttribute(va));
transformed_attribute->Reset(num_entries);
transformed_attribute->SetIdentityMapping();
return transformed_attribute;
}
} // namespace draco

View File

@ -0,0 +1,76 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
#include "draco/attributes/attribute_transform_data.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/decoder_buffer.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Virtual base class for various attribute transforms, enforcing common
// interface where possible.
class AttributeTransform {
public:
virtual ~AttributeTransform() = default;
// Return attribute transform type.
virtual AttributeTransformType Type() const = 0;
// Try to init transform from attribute.
virtual bool InitFromAttribute(const PointAttribute &attribute) = 0;
// Copy parameter values into the provided AttributeTransformData instance.
virtual void CopyToAttributeTransformData(
AttributeTransformData *out_data) const = 0;
bool TransferToAttribute(PointAttribute *attribute) const;
// Applies the transform to |attribute| and stores the result in
// |target_attribute|. |point_ids| is an optional vector that can be used to
// remap values during the transform.
virtual bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) = 0;
// Applies an inverse transform to |attribute| and stores the result in
// |target_attribute|. In this case, |attribute| is an attribute that was
// already transformed (e.g. quantized) and |target_attribute| is the
// attribute before the transformation.
virtual bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) = 0;
// Encodes all data needed by the transformation into the |encoder_buffer|.
virtual bool EncodeParameters(EncoderBuffer *encoder_buffer) const = 0;
// Decodes all data needed to transform |attribute| back to the original
// format.
virtual bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) = 0;
// Initializes a transformed attribute that can be used as target in the
// TransformAttribute() function call.
virtual std::unique_ptr<PointAttribute> InitTransformedAttribute(
const PointAttribute &src_attribute, int num_entries);
protected:
virtual DataType GetTransformedDataType(
const PointAttribute &attribute) const = 0;
virtual int GetTransformedNumComponents(
const PointAttribute &attribute) const = 0;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_

View File

@ -0,0 +1,71 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
#include <memory>
#include "draco/attributes/attribute_transform_type.h"
#include "draco/core/data_buffer.h"
namespace draco {
// Class for holding parameter values for an attribute transform of a
// PointAttribute. This can be for example quantization data for an attribute
// that holds quantized values. This class provides only a basic storage for
// attribute transform parameters and it should be accessed only through wrapper
// classes for a specific transform (e.g. AttributeQuantizationTransform).
class AttributeTransformData {
public:
AttributeTransformData() : transform_type_(ATTRIBUTE_INVALID_TRANSFORM) {}
AttributeTransformData(const AttributeTransformData &data) = default;
// Returns the type of the attribute transform that is described by the class.
AttributeTransformType transform_type() const { return transform_type_; }
void set_transform_type(AttributeTransformType type) {
transform_type_ = type;
}
// Returns a parameter value on a given |byte_offset|.
template <typename DataTypeT>
DataTypeT GetParameterValue(int byte_offset) const {
DataTypeT out_data;
buffer_.Read(byte_offset, &out_data, sizeof(DataTypeT));
return out_data;
}
// Sets a parameter value on a given |byte_offset|.
template <typename DataTypeT>
void SetParameterValue(int byte_offset, const DataTypeT &in_data) {
if (byte_offset + sizeof(DataTypeT) > buffer_.data_size()) {
buffer_.Resize(byte_offset + sizeof(DataTypeT));
}
buffer_.Write(byte_offset, &in_data, sizeof(DataTypeT));
}
// Sets a parameter value at the end of the |buffer_|.
template <typename DataTypeT>
void AppendParameterValue(const DataTypeT &in_data) {
SetParameterValue(static_cast<int>(buffer_.data_size()), in_data);
}
private:
AttributeTransformType transform_type_;
DataBuffer buffer_;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_

View File

@ -0,0 +1,30 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
namespace draco {
// List of all currently supported attribute transforms.
enum AttributeTransformType {
ATTRIBUTE_INVALID_TRANSFORM = -1,
ATTRIBUTE_NO_TRANSFORM = 0,
ATTRIBUTE_QUANTIZATION_TRANSFORM = 1,
ATTRIBUTE_OCTAHEDRON_TRANSFORM = 2,
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_

View File

@ -0,0 +1,102 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/geometry_attribute.h"
namespace draco {
GeometryAttribute::GeometryAttribute()
: buffer_(nullptr),
num_components_(1),
data_type_(DT_FLOAT32),
byte_stride_(0),
byte_offset_(0),
attribute_type_(INVALID),
unique_id_(0) {}
void GeometryAttribute::Init(GeometryAttribute::Type attribute_type,
DataBuffer *buffer, int8_t num_components,
DataType data_type, bool normalized,
int64_t byte_stride, int64_t byte_offset) {
buffer_ = buffer;
if (buffer) {
buffer_descriptor_.buffer_id = buffer->buffer_id();
buffer_descriptor_.buffer_update_count = buffer->update_count();
}
num_components_ = num_components;
data_type_ = data_type;
normalized_ = normalized;
byte_stride_ = byte_stride;
byte_offset_ = byte_offset;
attribute_type_ = attribute_type;
}
bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) {
num_components_ = src_att.num_components_;
data_type_ = src_att.data_type_;
normalized_ = src_att.normalized_;
byte_stride_ = src_att.byte_stride_;
byte_offset_ = src_att.byte_offset_;
attribute_type_ = src_att.attribute_type_;
buffer_descriptor_ = src_att.buffer_descriptor_;
unique_id_ = src_att.unique_id_;
if (src_att.buffer_ == nullptr) {
buffer_ = nullptr;
} else {
if (buffer_ == nullptr) {
return false;
}
buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size());
}
return true;
}
bool GeometryAttribute::operator==(const GeometryAttribute &va) const {
if (attribute_type_ != va.attribute_type_) {
return false;
}
// It's OK to compare just the buffer descriptors here. We don't need to
// compare the buffers themselves.
if (buffer_descriptor_.buffer_id != va.buffer_descriptor_.buffer_id) {
return false;
}
if (buffer_descriptor_.buffer_update_count !=
va.buffer_descriptor_.buffer_update_count) {
return false;
}
if (num_components_ != va.num_components_) {
return false;
}
if (data_type_ != va.data_type_) {
return false;
}
if (byte_stride_ != va.byte_stride_) {
return false;
}
if (byte_offset_ != va.byte_offset_) {
return false;
}
return true;
}
void GeometryAttribute::ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
int64_t byte_offset) {
buffer_ = buffer;
buffer_descriptor_.buffer_id = buffer->buffer_id();
buffer_descriptor_.buffer_update_count = buffer->update_count();
byte_stride_ = byte_stride;
byte_offset_ = byte_offset;
}
} // namespace draco

View File

@ -0,0 +1,350 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
#define DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
#include <array>
#include <limits>
#include "draco/attributes/geometry_indices.h"
#include "draco/core/data_buffer.h"
#include "draco/core/hash_utils.h"
namespace draco {
// The class provides access to a specific attribute which is stored in a
// DataBuffer, such as normals or coordinates. However, the GeometryAttribute
// class does not own the buffer and the buffer itself may store other data
// unrelated to this attribute (such as data for other attributes in which case
// we can have multiple GeometryAttributes accessing one buffer). Typically,
// all attributes for a point (or corner, face) are stored in one block, which
// is advantageous in terms of memory access. The length of the entire block is
// given by the byte_stride, the position where the attribute starts is given by
// the byte_offset, the actual number of bytes that the attribute occupies is
// given by the data_type and the number of components.
class GeometryAttribute {
public:
// Supported attribute types.
enum Type {
INVALID = -1,
// Named attributes start here. The difference between named and generic
// attributes is that for named attributes we know their purpose and we
// can apply some special methods when dealing with them (e.g. during
// encoding).
POSITION = 0,
NORMAL,
COLOR,
TEX_COORD,
// A special id used to mark attributes that are not assigned to any known
// predefined use case. Such attributes are often used for a shader specific
// data.
GENERIC,
// Total number of different attribute types.
// Always keep behind all named attributes.
NAMED_ATTRIBUTES_COUNT,
};
GeometryAttribute();
// Initializes and enables the attribute.
void Init(Type attribute_type, DataBuffer *buffer, int8_t num_components,
DataType data_type, bool normalized, int64_t byte_stride,
int64_t byte_offset);
bool IsValid() const { return buffer_ != nullptr; }
// Copies data from the source attribute to the this attribute.
// This attribute must have a valid buffer allocated otherwise the operation
// is going to fail and return false.
bool CopyFrom(const GeometryAttribute &src_att);
// Function for getting a attribute value with a specific format.
// Unsafe. Caller must ensure the accessed memory is valid.
// T is the attribute data type.
// att_components_t is the number of attribute components.
template <typename T, int att_components_t>
std::array<T, att_components_t> GetValue(
AttributeValueIndex att_index) const {
// Byte address of the attribute index.
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
std::array<T, att_components_t> out;
buffer_->Read(byte_pos, &(out[0]), sizeof(out));
return out;
}
// Function for getting a attribute value with a specific format.
// T is the attribute data type.
// att_components_t is the number of attribute components.
template <typename T, int att_components_t>
bool GetValue(AttributeValueIndex att_index,
std::array<T, att_components_t> *out) const {
// Byte address of the attribute index.
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
// Check we are not reading past end of data.
if (byte_pos + sizeof(*out) > buffer_->data_size()) {
return false;
}
buffer_->Read(byte_pos, &((*out)[0]), sizeof(*out));
return true;
}
// Returns the byte position of the attribute entry in the data buffer.
inline int64_t GetBytePos(AttributeValueIndex att_index) const {
return byte_offset_ + byte_stride_ * att_index.value();
}
inline const uint8_t *GetAddress(AttributeValueIndex att_index) const {
const int64_t byte_pos = GetBytePos(att_index);
return buffer_->data() + byte_pos;
}
inline uint8_t *GetAddress(AttributeValueIndex att_index) {
const int64_t byte_pos = GetBytePos(att_index);
return buffer_->data() + byte_pos;
}
inline bool IsAddressValid(const uint8_t *address) const {
return ((buffer_->data() + buffer_->data_size()) > address);
}
// Fills out_data with the raw value of the requested attribute entry.
// out_data must be at least byte_stride_ long.
void GetValue(AttributeValueIndex att_index, void *out_data) const {
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
buffer_->Read(byte_pos, out_data, byte_stride_);
}
// Sets a value of an attribute entry. The input value must be allocated to
// cover all components of a single attribute entry.
void SetAttributeValue(AttributeValueIndex entry_index, const void *value) {
const int64_t byte_pos = entry_index.value() * byte_stride();
buffer_->Write(byte_pos, value, byte_stride());
}
// DEPRECATED: Use
// ConvertValue(AttributeValueIndex att_id,
// int out_num_components,
// OutT *out_val);
//
// Function for conversion of a attribute to a specific output format.
// OutT is the desired data type of the attribute.
// out_att_components_t is the number of components of the output format.
// Returns false when the conversion failed.
template <typename OutT, int out_att_components_t>
bool ConvertValue(AttributeValueIndex att_id, OutT *out_val) const {
return ConvertValue(att_id, out_att_components_t, out_val);
}
// Function for conversion of a attribute to a specific output format.
// |out_val| needs to be able to store |out_num_components| values.
// OutT is the desired data type of the attribute.
// Returns false when the conversion failed.
template <typename OutT>
bool ConvertValue(AttributeValueIndex att_id, int8_t out_num_components,
OutT *out_val) const {
if (out_val == nullptr) {
return false;
}
switch (data_type_) {
case DT_INT8:
return ConvertTypedValue<int8_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT8:
return ConvertTypedValue<uint8_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT16:
return ConvertTypedValue<int16_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT16:
return ConvertTypedValue<uint16_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT32:
return ConvertTypedValue<int32_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT32:
return ConvertTypedValue<uint32_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT64:
return ConvertTypedValue<int64_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT64:
return ConvertTypedValue<uint64_t, OutT>(att_id, out_num_components,
out_val);
case DT_FLOAT32:
return ConvertTypedValue<float, OutT>(att_id, out_num_components,
out_val);
case DT_FLOAT64:
return ConvertTypedValue<double, OutT>(att_id, out_num_components,
out_val);
case DT_BOOL:
return ConvertTypedValue<bool, OutT>(att_id, out_num_components,
out_val);
default:
// Wrong attribute type.
return false;
}
}
// Function for conversion of a attribute to a specific output format.
// The |out_value| must be able to store all components of a single attribute
// entry.
// OutT is the desired data type of the attribute.
// Returns false when the conversion failed.
template <typename OutT>
bool ConvertValue(AttributeValueIndex att_index, OutT *out_value) const {
return ConvertValue<OutT>(att_index, num_components_, out_value);
}
// Utility function. Returns |attribute_type| as std::string.
static std::string TypeToString(Type attribute_type) {
switch (attribute_type) {
case INVALID:
return "INVALID";
case POSITION:
return "POSITION";
case NORMAL:
return "NORMAL";
case COLOR:
return "COLOR";
case TEX_COORD:
return "TEX_COORD";
case GENERIC:
return "GENERIC";
default:
return "UNKNOWN";
}
}
bool operator==(const GeometryAttribute &va) const;
// Returns the type of the attribute indicating the nature of the attribute.
Type attribute_type() const { return attribute_type_; }
void set_attribute_type(Type type) { attribute_type_ = type; }
// Returns the data type that is stored in the attribute.
DataType data_type() const { return data_type_; }
// Returns the number of components that are stored for each entry.
// For position attribute this is usually three (x,y,z),
// while texture coordinates have two components (u,v).
int8_t num_components() const { return num_components_; }
// Indicates whether the data type should be normalized before interpretation,
// that is, it should be divided by the max value of the data type.
bool normalized() const { return normalized_; }
// The buffer storing the entire data of the attribute.
const DataBuffer *buffer() const { return buffer_; }
// Returns the number of bytes between two attribute entries, this is, at
// least size of the data types times number of components.
int64_t byte_stride() const { return byte_stride_; }
// The offset where the attribute starts within the block of size byte_stride.
int64_t byte_offset() const { return byte_offset_; }
void set_byte_offset(int64_t byte_offset) { byte_offset_ = byte_offset; }
DataBufferDescriptor buffer_descriptor() const { return buffer_descriptor_; }
uint32_t unique_id() const { return unique_id_; }
void set_unique_id(uint32_t id) { unique_id_ = id; }
protected:
// Sets a new internal storage for the attribute.
void ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
int64_t byte_offset);
private:
// Function for conversion of an attribute to a specific output format given a
// format of the stored attribute.
// T is the stored attribute data type.
// OutT is the desired data type of the attribute.
template <typename T, typename OutT>
bool ConvertTypedValue(AttributeValueIndex att_id, int8_t out_num_components,
OutT *out_value) const {
const uint8_t *src_address = GetAddress(att_id);
// Convert all components available in both the original and output formats.
for (int i = 0; i < std::min(num_components_, out_num_components); ++i) {
if (!IsAddressValid(src_address)) {
return false;
}
const T in_value = *reinterpret_cast<const T *>(src_address);
// Make sure the in_value fits within the range of values that OutT
// is able to represent. Perform the check only for integral types.
if (std::is_integral<T>::value && std::is_integral<OutT>::value) {
static constexpr OutT kOutMin =
std::is_signed<T>::value ? std::numeric_limits<OutT>::lowest() : 0;
if (in_value < kOutMin || in_value > std::numeric_limits<OutT>::max()) {
return false;
}
}
out_value[i] = static_cast<OutT>(in_value);
// When converting integer to floating point, normalize the value if
// necessary.
if (std::is_integral<T>::value && std::is_floating_point<OutT>::value &&
normalized_) {
out_value[i] /= static_cast<OutT>(std::numeric_limits<T>::max());
}
// TODO(ostava): Add handling of normalized attributes when converting
// between different integer representations. If the attribute is
// normalized, integer values should be converted as if they represent 0-1
// range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1>
// should be converted to range <0, 2^8 - 1>.
src_address += sizeof(T);
}
// Fill empty data for unused output components if needed.
for (int i = num_components_; i < out_num_components; ++i) {
out_value[i] = static_cast<OutT>(0);
}
return true;
}
DataBuffer *buffer_;
// The buffer descriptor is stored at the time the buffer is attached to this
// attribute. The purpose is to detect if any changes happened to the buffer
// since the time it was attached.
DataBufferDescriptor buffer_descriptor_;
int8_t num_components_;
DataType data_type_;
bool normalized_;
int64_t byte_stride_;
int64_t byte_offset_;
Type attribute_type_;
// Unique id of this attribute. No two attributes could have the same unique
// id. It is used to identify each attribute, especially when there are
// multiple attribute of the same type in a point cloud.
uint32_t unique_id_;
friend struct GeometryAttributeHasher;
};
// Hashing support
// Function object for using Attribute as a hash key.
struct GeometryAttributeHasher {
size_t operator()(const GeometryAttribute &va) const {
size_t hash = HashCombine(va.buffer_descriptor_.buffer_id,
va.buffer_descriptor_.buffer_update_count);
hash = HashCombine(va.num_components_, hash);
hash = HashCombine(static_cast<int8_t>(va.data_type_), hash);
hash = HashCombine(static_cast<int8_t>(va.attribute_type_), hash);
hash = HashCombine(va.byte_stride_, hash);
return HashCombine(va.byte_offset_, hash);
}
};
// Function object for using GeometryAttribute::Type as a hash key.
struct GeometryAttributeTypeHasher {
size_t operator()(const GeometryAttribute::Type &at) const {
return static_cast<size_t>(at);
}
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_

View File

@ -0,0 +1,54 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
#define DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
#include <inttypes.h>
#include <limits>
#include "draco/core/draco_index_type.h"
namespace draco {
// Index of an attribute value entry stored in a GeometryAttribute.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, AttributeValueIndex)
// Index of a point in a PointCloud.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, PointIndex)
// Vertex index in a Mesh or CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, VertexIndex)
// Corner index that identifies a corner in a Mesh or CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, CornerIndex)
// Face index for Mesh and CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, FaceIndex)
// Constants denoting invalid indices.
static constexpr AttributeValueIndex kInvalidAttributeValueIndex(
std::numeric_limits<uint32_t>::max());
static constexpr PointIndex kInvalidPointIndex(
std::numeric_limits<uint32_t>::max());
static constexpr VertexIndex kInvalidVertexIndex(
std::numeric_limits<uint32_t>::max());
static constexpr CornerIndex kInvalidCornerIndex(
std::numeric_limits<uint32_t>::max());
static constexpr FaceIndex kInvalidFaceIndex(
std::numeric_limits<uint32_t>::max());
// TODO(ostava): Add strongly typed indices for attribute id and unique
// attribute id.
} // namespace draco
#endif // DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_

View File

@ -0,0 +1,225 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/point_attribute.h"
#include <unordered_map>
using std::unordered_map;
// Shortcut for typed conditionals.
template <bool B, class T, class F>
using conditional_t = typename std::conditional<B, T, F>::type;
namespace draco {
PointAttribute::PointAttribute()
: num_unique_entries_(0), identity_mapping_(false) {}
PointAttribute::PointAttribute(const GeometryAttribute &att)
: GeometryAttribute(att),
num_unique_entries_(0),
identity_mapping_(false) {}
void PointAttribute::Init(Type attribute_type, int8_t num_components,
DataType data_type, bool normalized,
size_t num_attribute_values) {
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
GeometryAttribute::Init(attribute_type, attribute_buffer_.get(),
num_components, data_type, normalized,
DataTypeLength(data_type) * num_components, 0);
Reset(num_attribute_values);
SetIdentityMapping();
}
void PointAttribute::CopyFrom(const PointAttribute &src_att) {
if (buffer() == nullptr) {
// If the destination attribute doesn't have a valid buffer, create it.
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
ResetBuffer(attribute_buffer_.get(), 0, 0);
}
if (!GeometryAttribute::CopyFrom(src_att)) {
return;
}
identity_mapping_ = src_att.identity_mapping_;
num_unique_entries_ = src_att.num_unique_entries_;
indices_map_ = src_att.indices_map_;
if (src_att.attribute_transform_data_) {
attribute_transform_data_ = std::unique_ptr<AttributeTransformData>(
new AttributeTransformData(*src_att.attribute_transform_data_));
} else {
attribute_transform_data_ = nullptr;
}
}
bool PointAttribute::Reset(size_t num_attribute_values) {
if (attribute_buffer_ == nullptr) {
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
}
const int64_t entry_size = DataTypeLength(data_type()) * num_components();
if (!attribute_buffer_->Update(nullptr, num_attribute_values * entry_size)) {
return false;
}
// Assign the new buffer to the parent attribute.
ResetBuffer(attribute_buffer_.get(), entry_size, 0);
num_unique_entries_ = static_cast<uint32_t>(num_attribute_values);
return true;
}
void PointAttribute::Resize(size_t new_num_unique_entries) {
num_unique_entries_ = static_cast<uint32_t>(new_num_unique_entries);
attribute_buffer_->Resize(new_num_unique_entries * byte_stride());
}
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
const GeometryAttribute &in_att) {
return DeduplicateValues(in_att, AttributeValueIndex(0));
}
AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
AttributeValueIndex::ValueType unique_vals = 0;
switch (in_att.data_type()) {
// Currently we support only float, uint8, and uint16 arguments.
case DT_FLOAT32:
unique_vals = DeduplicateTypedValues<float>(in_att, in_att_offset);
break;
case DT_INT8:
unique_vals = DeduplicateTypedValues<int8_t>(in_att, in_att_offset);
break;
case DT_UINT8:
case DT_BOOL:
unique_vals = DeduplicateTypedValues<uint8_t>(in_att, in_att_offset);
break;
case DT_UINT16:
unique_vals = DeduplicateTypedValues<uint16_t>(in_att, in_att_offset);
break;
case DT_INT16:
unique_vals = DeduplicateTypedValues<int16_t>(in_att, in_att_offset);
break;
case DT_UINT32:
unique_vals = DeduplicateTypedValues<uint32_t>(in_att, in_att_offset);
break;
case DT_INT32:
unique_vals = DeduplicateTypedValues<int32_t>(in_att, in_att_offset);
break;
default:
return -1; // Unsupported data type.
}
if (unique_vals == 0) {
return -1; // Unexpected error.
}
return unique_vals;
}
// Helper function for calling UnifyDuplicateAttributes<T,num_components_t>
// with the correct template arguments.
// Returns the number of unique attribute values.
template <typename T>
AttributeValueIndex::ValueType PointAttribute::DeduplicateTypedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
// Select the correct method to call based on the number of attribute
// components.
switch (in_att.num_components()) {
case 1:
return DeduplicateFormattedValues<T, 1>(in_att, in_att_offset);
case 2:
return DeduplicateFormattedValues<T, 2>(in_att, in_att_offset);
case 3:
return DeduplicateFormattedValues<T, 3>(in_att, in_att_offset);
case 4:
return DeduplicateFormattedValues<T, 4>(in_att, in_att_offset);
default:
return 0;
}
}
template <typename T, int num_components_t>
AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
// We want to detect duplicates using a hash map but we cannot hash floating
// point numbers directly so bit-copy floats to the same sized integers and
// hash them.
// First we need to determine which int type to use (1, 2, 4 or 8 bytes).
// Note, this is done at compile time using std::conditional struct.
// Conditional is in form <bool-expression, true, false>. If bool-expression
// is true the "true" branch is used and vice versa. All at compile time.
typedef conditional_t<sizeof(T) == 1, uint8_t,
conditional_t<sizeof(T) == 2, uint16_t,
conditional_t<sizeof(T) == 4, uint32_t,
/*else*/ uint64_t>>>
HashType;
AttributeValueIndex unique_vals(0);
typedef std::array<T, num_components_t> AttributeValue;
typedef std::array<HashType, num_components_t> AttributeHashableValue;
// Hash map storing index of the first attribute with a given value.
unordered_map<AttributeHashableValue, AttributeValueIndex,
HashArray<AttributeHashableValue>>
value_to_index_map;
AttributeValue att_value;
AttributeHashableValue hashable_value;
IndexTypeVector<AttributeValueIndex, AttributeValueIndex> value_map(
num_unique_entries_);
for (AttributeValueIndex i(0); i < num_unique_entries_; ++i) {
const AttributeValueIndex att_pos = i + in_att_offset;
att_value = in_att.GetValue<T, num_components_t>(att_pos);
// Convert the value to hashable type. Bit-copy real attributes to integers.
memcpy(&(hashable_value[0]), &(att_value[0]), sizeof(att_value));
// Check if the given attribute value has been used before already.
auto it = value_to_index_map.find(hashable_value);
if (it != value_to_index_map.end()) {
// Duplicated value found. Update index mapping.
value_map[i] = it->second;
} else {
// New unique value.
// Update the hash map with a new entry pointing to the latest unique
// vertex index.
value_to_index_map.insert(
std::pair<AttributeHashableValue, AttributeValueIndex>(hashable_value,
unique_vals));
// Add the unique value to the mesh builder.
SetAttributeValue(unique_vals, &att_value);
// Update index mapping.
value_map[i] = unique_vals;
++unique_vals;
}
}
if (unique_vals == num_unique_entries_) {
return unique_vals.value(); // Nothing has changed.
}
if (is_mapping_identity()) {
// Change identity mapping to the explicit one.
// The number of points is equal to the number of old unique values.
SetExplicitMapping(num_unique_entries_);
// Update the explicit map.
for (uint32_t i = 0; i < num_unique_entries_; ++i) {
SetPointMapEntry(PointIndex(i), value_map[AttributeValueIndex(i)]);
}
} else {
// Update point to value map using the mapping between old and new values.
for (PointIndex i(0); i < static_cast<uint32_t>(indices_map_.size()); ++i) {
SetPointMapEntry(i, value_map[indices_map_[i]]);
}
}
num_unique_entries_ = unique_vals.value();
return num_unique_entries_;
}
#endif
} // namespace draco

View File

@ -0,0 +1,190 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
#define DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
#include <memory>
#include "draco/attributes/attribute_transform_data.h"
#include "draco/attributes/geometry_attribute.h"
#include "draco/core/draco_index_type_vector.h"
#include "draco/core/hash_utils.h"
#include "draco/core/macros.h"
#include "draco/draco_features.h"
namespace draco {
// Class for storing point specific data about each attribute. In general,
// multiple points stored in a point cloud can share the same attribute value
// and this class provides the necessary mapping between point ids and attribute
// value ids.
class PointAttribute : public GeometryAttribute {
public:
PointAttribute();
explicit PointAttribute(const GeometryAttribute &att);
// Make sure the move constructor is defined (needed for better performance
// when new attributes are added to PointCloud).
PointAttribute(PointAttribute &&attribute) = default;
PointAttribute &operator=(PointAttribute &&attribute) = default;
// Initializes a point attribute. By default the attribute will be set to
// identity mapping between point indices and attribute values. To set custom
// mapping use SetExplicitMapping() function.
void Init(Type attribute_type, int8_t num_components, DataType data_type,
bool normalized, size_t num_attribute_values);
// Copies attribute data from the provided |src_att| attribute.
void CopyFrom(const PointAttribute &src_att);
// Prepares the attribute storage for the specified number of entries.
bool Reset(size_t num_attribute_values);
size_t size() const { return num_unique_entries_; }
AttributeValueIndex mapped_index(PointIndex point_index) const {
if (identity_mapping_) {
return AttributeValueIndex(point_index.value());
}
return indices_map_[point_index];
}
DataBuffer *buffer() const { return attribute_buffer_.get(); }
bool is_mapping_identity() const { return identity_mapping_; }
size_t indices_map_size() const {
if (is_mapping_identity()) {
return 0;
}
return indices_map_.size();
}
const uint8_t *GetAddressOfMappedIndex(PointIndex point_index) const {
return GetAddress(mapped_index(point_index));
}
// Sets the new number of unique attribute entries for the attribute. The
// function resizes the attribute storage to hold |num_attribute_values|
// entries.
// All previous entries with AttributeValueIndex < |num_attribute_values|
// are preserved. Caller needs to ensure that the PointAttribute is still
// valid after the resizing operation (that is, each point is mapped to a
// valid attribute value).
void Resize(size_t new_num_unique_entries);
// Functions for setting the type of mapping between point indices and
// attribute entry ids.
// This function sets the mapping to implicit, where point indices are equal
// to attribute entry indices.
void SetIdentityMapping() {
identity_mapping_ = true;
indices_map_.clear();
}
// This function sets the mapping to be explicitly using the indices_map_
// array that needs to be initialized by the caller.
void SetExplicitMapping(size_t num_points) {
identity_mapping_ = false;
indices_map_.resize(num_points, kInvalidAttributeValueIndex);
}
// Set an explicit map entry for a specific point index.
void SetPointMapEntry(PointIndex point_index,
AttributeValueIndex entry_index) {
DRACO_DCHECK(!identity_mapping_);
indices_map_[point_index] = entry_index;
}
// Same as GeometryAttribute::GetValue(), but using point id as the input.
// Mapping to attribute value index is performed automatically.
void GetMappedValue(PointIndex point_index, void *out_data) const {
return GetValue(mapped_index(point_index), out_data);
}
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
// Deduplicate |in_att| values into |this| attribute. |in_att| can be equal
// to |this|.
// Returns -1 if the deduplication failed.
AttributeValueIndex::ValueType DeduplicateValues(
const GeometryAttribute &in_att);
// Same as above but the values read from |in_att| are sampled with the
// provided offset |in_att_offset|.
AttributeValueIndex::ValueType DeduplicateValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
#endif
// Set attribute transform data for the attribute. The data is used to store
// the type and parameters of the transform that is applied on the attribute
// data (optional).
void SetAttributeTransformData(
std::unique_ptr<AttributeTransformData> transform_data) {
attribute_transform_data_ = std::move(transform_data);
}
const AttributeTransformData *GetAttributeTransformData() const {
return attribute_transform_data_.get();
}
private:
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
template <typename T>
AttributeValueIndex::ValueType DeduplicateTypedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
template <typename T, int COMPONENTS_COUNT>
AttributeValueIndex::ValueType DeduplicateFormattedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
#endif
// Data storage for attribute values. GeometryAttribute itself doesn't own its
// buffer so we need to allocate it here.
std::unique_ptr<DataBuffer> attribute_buffer_;
// Mapping between point ids and attribute value ids.
IndexTypeVector<PointIndex, AttributeValueIndex> indices_map_;
AttributeValueIndex::ValueType num_unique_entries_;
// Flag when the mapping between point ids and attribute values is identity.
bool identity_mapping_;
// If an attribute contains transformed data (e.g. quantized), we can specify
// the attribute transform here and use it to transform the attribute back to
// its original format.
std::unique_ptr<AttributeTransformData> attribute_transform_data_;
friend struct PointAttributeHasher;
};
// Hash functor for the PointAttribute class.
struct PointAttributeHasher {
size_t operator()(const PointAttribute &attribute) const {
GeometryAttributeHasher base_hasher;
size_t hash = base_hasher(attribute);
hash = HashCombine(attribute.identity_mapping_, hash);
hash = HashCombine(attribute.num_unique_entries_, hash);
hash = HashCombine(attribute.indices_map_.size(), hash);
if (!attribute.indices_map_.empty()) {
const uint64_t indices_hash = FingerprintString(
reinterpret_cast<const char *>(attribute.indices_map_.data()),
attribute.indices_map_.size());
hash = HashCombine(indices_hash, hash);
}
if (attribute.attribute_buffer_ != nullptr) {
const uint64_t buffer_hash = FingerprintString(
reinterpret_cast<const char *>(attribute.attribute_buffer_->data()),
attribute.attribute_buffer_->data_size());
hash = HashCombine(buffer_hash, hash);
}
return hash;
}
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_

View File

@ -0,0 +1,128 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/point_attribute.h"
#include "draco/core/draco_test_base.h"
namespace {
class PointAttributeTest : public ::testing::Test {
protected:
PointAttributeTest() {}
};
TEST_F(PointAttributeTest, TestCopy) {
// This test verifies that PointAttribute can copy data from another point
// attribute.
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 1, draco::DT_INT32, false, 10);
for (int32_t i = 0; i < 10; ++i) {
pa.SetAttributeValue(draco::AttributeValueIndex(i), &i);
}
pa.set_unique_id(12);
draco::PointAttribute other_pa;
other_pa.CopyFrom(pa);
draco::PointAttributeHasher hasher;
ASSERT_EQ(hasher(pa), hasher(other_pa));
ASSERT_EQ(pa.unique_id(), other_pa.unique_id());
// The hash function does not actually compute the hash from attribute values,
// so ensure the data got copied correctly as well.
for (int32_t i = 0; i < 10; ++i) {
int32_t data;
other_pa.GetValue(draco::AttributeValueIndex(i), &data);
ASSERT_EQ(data, i);
}
}
TEST_F(PointAttributeTest, TestGetValueFloat) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
for (int32_t i = 0; i < 5; ++i) {
pa.GetValue(draco::AttributeValueIndex(i), &points);
ASSERT_FLOAT_EQ(points[0], i * 3.0);
ASSERT_FLOAT_EQ(points[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(points[2], (i * 3.0) + 2.0);
}
}
TEST_F(PointAttributeTest, TestGetArray) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
for (int32_t i = 0; i < 5; ++i) {
std::array<float, 3> att_value;
att_value = pa.GetValue<float, 3>(draco::AttributeValueIndex(i));
ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
}
for (int32_t i = 0; i < 5; ++i) {
std::array<float, 3> att_value;
EXPECT_TRUE(
(pa.GetValue<float, 3>(draco::AttributeValueIndex(i), &att_value)));
ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
}
}
TEST_F(PointAttributeTest, TestArrayReadError) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
std::array<float, 3> att_value;
EXPECT_FALSE(
(pa.GetValue<float, 3>(draco::AttributeValueIndex(5), &att_value)));
}
TEST_F(PointAttributeTest, TestResize) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
ASSERT_EQ(pa.size(), 5);
ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 5);
pa.Resize(10);
ASSERT_EQ(pa.size(), 10);
ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 10);
}
} // namespace

View File

@ -0,0 +1,127 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/attributes_decoder.h"
#include "draco/core/varint_decoding.h"
namespace draco {
AttributesDecoder::AttributesDecoder()
: point_cloud_decoder_(nullptr), point_cloud_(nullptr) {}
bool AttributesDecoder::Init(PointCloudDecoder *decoder, PointCloud *pc) {
point_cloud_decoder_ = decoder;
point_cloud_ = pc;
return true;
}
bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
// Decode and create attributes.
uint32_t num_attributes;
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (point_cloud_decoder_->bitstream_version() <
DRACO_BITSTREAM_VERSION(2, 0)) {
if (!in_buffer->Decode(&num_attributes)) {
return false;
}
} else
#endif
{
if (!DecodeVarint(&num_attributes, in_buffer)) {
return false;
}
}
// Check that decoded number of attributes is valid.
if (num_attributes == 0) {
return false;
}
if (num_attributes > 5 * in_buffer->remaining_size()) {
// The decoded number of attributes is unreasonably high, because at least
// five bytes of attribute descriptor data per attribute are expected.
return false;
}
// Decode attribute descriptor data.
point_attribute_ids_.resize(num_attributes);
PointCloud *pc = point_cloud_;
for (uint32_t i = 0; i < num_attributes; ++i) {
// Decode attribute descriptor data.
uint8_t att_type, data_type, num_components, normalized;
if (!in_buffer->Decode(&att_type)) {
return false;
}
if (!in_buffer->Decode(&data_type)) {
return false;
}
if (!in_buffer->Decode(&num_components)) {
return false;
}
if (!in_buffer->Decode(&normalized)) {
return false;
}
if (att_type >= GeometryAttribute::NAMED_ATTRIBUTES_COUNT) {
return false;
}
if (data_type == DT_INVALID || data_type >= DT_TYPES_COUNT) {
return false;
}
// Check decoded attribute descriptor data.
if (num_components == 0) {
return false;
}
// Add the attribute to the point cloud.
const DataType draco_dt = static_cast<DataType>(data_type);
GeometryAttribute ga;
ga.Init(static_cast<GeometryAttribute::Type>(att_type), nullptr,
num_components, draco_dt, normalized > 0,
DataTypeLength(draco_dt) * num_components, 0);
uint32_t unique_id;
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (point_cloud_decoder_->bitstream_version() <
DRACO_BITSTREAM_VERSION(1, 3)) {
uint16_t custom_id;
if (!in_buffer->Decode(&custom_id)) {
return false;
}
// TODO(draco-eng): Add "custom_id" to attribute metadata.
unique_id = static_cast<uint32_t>(custom_id);
ga.set_unique_id(unique_id);
} else
#endif
{
if (!DecodeVarint(&unique_id, in_buffer)) {
return false;
}
ga.set_unique_id(unique_id);
}
const int att_id = pc->AddAttribute(
std::unique_ptr<PointAttribute>(new PointAttribute(ga)));
pc->attribute(att_id)->set_unique_id(unique_id);
point_attribute_ids_[i] = att_id;
// Update the inverse map.
if (att_id >=
static_cast<int32_t>(point_attribute_to_local_id_map_.size())) {
point_attribute_to_local_id_map_.resize(att_id + 1, -1);
}
point_attribute_to_local_id_map_[att_id] = i;
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,97 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
#include <vector>
#include "draco/compression/attributes/attributes_decoder_interface.h"
#include "draco/compression/point_cloud/point_cloud_decoder.h"
#include "draco/core/decoder_buffer.h"
#include "draco/draco_features.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
// Base class for decoding one or more attributes that were encoded with a
// matching AttributesEncoder. It is a basic implementation of
// AttributesDecoderInterface that provides functionality that is shared between
// all AttributesDecoders.
class AttributesDecoder : public AttributesDecoderInterface {
public:
AttributesDecoder();
virtual ~AttributesDecoder() = default;
// Called after all attribute decoders are created. It can be used to perform
// any custom initialization.
bool Init(PointCloudDecoder *decoder, PointCloud *pc) override;
// Decodes any attribute decoder specific data from the |in_buffer|.
bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) override;
int32_t GetAttributeId(int i) const override {
return point_attribute_ids_[i];
}
int32_t GetNumAttributes() const override {
return static_cast<int32_t>(point_attribute_ids_.size());
}
PointCloudDecoder *GetDecoder() const override {
return point_cloud_decoder_;
}
// Decodes attribute data from the source buffer.
bool DecodeAttributes(DecoderBuffer *in_buffer) override {
if (!DecodePortableAttributes(in_buffer)) {
return false;
}
if (!DecodeDataNeededByPortableTransforms(in_buffer)) {
return false;
}
if (!TransformAttributesToOriginalFormat()) {
return false;
}
return true;
}
protected:
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size =
static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size) {
return -1;
}
return point_attribute_to_local_id_map_[point_attribute_id];
}
virtual bool DecodePortableAttributes(DecoderBuffer *in_buffer) = 0;
virtual bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) {
return true;
}
virtual bool TransformAttributesToOriginalFormat() { return true; }
private:
// List of attribute ids that need to be decoded with this decoder.
std::vector<int32_t> point_attribute_ids_;
// Map between point attribute id and the local id (i.e., the inverse of the
// |point_attribute_ids_|.
std::vector<int32_t> point_attribute_to_local_id_map_;
PointCloudDecoder *point_cloud_decoder_;
PointCloud *point_cloud_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_

View File

@ -0,0 +1,62 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
#include <vector>
#include "draco/core/decoder_buffer.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
class PointCloudDecoder;
// Interface class for decoding one or more attributes that were encoded with a
// matching AttributesEncoder. It provides only the basic interface
// that is used by the PointCloudDecoder. The actual decoding must be
// implemented in derived classes using the DecodeAttributes() method.
class AttributesDecoderInterface {
public:
AttributesDecoderInterface() = default;
virtual ~AttributesDecoderInterface() = default;
// Called after all attribute decoders are created. It can be used to perform
// any custom initialization.
virtual bool Init(PointCloudDecoder *decoder, PointCloud *pc) = 0;
// Decodes any attribute decoder specific data from the |in_buffer|.
virtual bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) = 0;
// Decode attribute data from the source buffer. Needs to be implemented by
// the derived classes.
virtual bool DecodeAttributes(DecoderBuffer *in_buffer) = 0;
virtual int32_t GetAttributeId(int i) const = 0;
virtual int32_t GetNumAttributes() const = 0;
virtual PointCloudDecoder *GetDecoder() const = 0;
// Returns an attribute containing data processed by the attribute transform.
// (see TransformToPortableFormat() method). This data is guaranteed to be
// same for encoder and decoder and it can be used by predictors.
virtual const PointAttribute *GetPortableAttribute(
int32_t /* point_attribute_id */) {
return nullptr;
}
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_

View File

@ -0,0 +1,49 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/core/varint_encoding.h"
namespace draco {
AttributesEncoder::AttributesEncoder()
: point_cloud_encoder_(nullptr), point_cloud_(nullptr) {}
AttributesEncoder::AttributesEncoder(int att_id) : AttributesEncoder() {
AddAttributeId(att_id);
}
bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) {
point_cloud_encoder_ = encoder;
point_cloud_ = pc;
return true;
}
bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) {
// Encode data about all attributes.
EncodeVarint(num_attributes(), out_buffer);
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int32_t att_id = point_attribute_ids_[i];
const PointAttribute *const pa = point_cloud_->attribute(att_id);
out_buffer->Encode(static_cast<uint8_t>(pa->attribute_type()));
out_buffer->Encode(static_cast<uint8_t>(pa->data_type()));
out_buffer->Encode(static_cast<uint8_t>(pa->num_components()));
out_buffer->Encode(static_cast<uint8_t>(pa->normalized()));
EncodeVarint(pa->unique_id(), out_buffer);
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,154 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
class PointCloudEncoder;
// Base class for encoding one or more attributes of a PointCloud (or other
// geometry). This base class provides only the basic interface that is used
// by the PointCloudEncoder.
class AttributesEncoder {
public:
AttributesEncoder();
// Constructs an attribute encoder associated with a given point attribute.
explicit AttributesEncoder(int point_attrib_id);
virtual ~AttributesEncoder() = default;
// Called after all attribute encoders are created. It can be used to perform
// any custom initialization, including setting up attribute dependencies.
// Note: no data should be encoded in this function, because the decoder may
// process encoders in a different order from the decoder.
virtual bool Init(PointCloudEncoder *encoder, const PointCloud *pc);
// Encodes data needed by the target attribute decoder.
virtual bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer);
// Returns a unique identifier of the given encoder type, that is used during
// decoding to construct the corresponding attribute decoder.
virtual uint8_t GetUniqueId() const = 0;
// Encode attribute data to the target buffer.
virtual bool EncodeAttributes(EncoderBuffer *out_buffer) {
if (!TransformAttributesToPortableFormat()) {
return false;
}
if (!EncodePortableAttributes(out_buffer)) {
return false;
}
// Encode data needed by portable transforms after the attribute is encoded.
// This corresponds to the order in which the data is going to be decoded by
// the decoder.
if (!EncodeDataNeededByPortableTransforms(out_buffer)) {
return false;
}
return true;
}
// Returns the number of attributes that need to be encoded before the
// specified attribute is encoded.
// Note that the attribute is specified by its point attribute id.
virtual int NumParentAttributes(int32_t /* point_attribute_id */) const {
return 0;
}
virtual int GetParentAttributeId(int32_t /* point_attribute_id */,
int32_t /* parent_i */) const {
return -1;
}
// Marks a given attribute as a parent of another attribute.
virtual bool MarkParentAttribute(int32_t /* point_attribute_id */) {
return false;
}
// Returns an attribute containing data processed by the attribute transform.
// (see TransformToPortableFormat() method). This data is guaranteed to be
// encoded losslessly and it can be safely used for predictors.
virtual const PointAttribute *GetPortableAttribute(
int32_t /* point_attribute_id */) {
return nullptr;
}
void AddAttributeId(int32_t id) {
point_attribute_ids_.push_back(id);
if (id >= static_cast<int32_t>(point_attribute_to_local_id_map_.size())) {
point_attribute_to_local_id_map_.resize(id + 1, -1);
}
point_attribute_to_local_id_map_[id] =
static_cast<int32_t>(point_attribute_ids_.size()) - 1;
}
// Sets new attribute point ids (replacing the existing ones).
void SetAttributeIds(const std::vector<int32_t> &point_attribute_ids) {
point_attribute_ids_.clear();
point_attribute_to_local_id_map_.clear();
for (int32_t att_id : point_attribute_ids) {
AddAttributeId(att_id);
}
}
int32_t GetAttributeId(int i) const { return point_attribute_ids_[i]; }
uint32_t num_attributes() const {
return static_cast<uint32_t>(point_attribute_ids_.size());
}
PointCloudEncoder *encoder() const { return point_cloud_encoder_; }
protected:
// Transforms the input attribute data into a form that should be losslessly
// encoded (transform itself can be lossy).
virtual bool TransformAttributesToPortableFormat() { return true; }
// Losslessly encodes data of all portable attributes.
// Precondition: All attributes must have been transformed into portable
// format at this point (see TransformAttributesToPortableFormat() method).
virtual bool EncodePortableAttributes(EncoderBuffer *out_buffer) = 0;
// Encodes any data needed to revert the transform to portable format for each
// attribute (e.g. data needed for dequantization of quantized values).
virtual bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) {
return true;
}
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size =
static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size) {
return -1;
}
return point_attribute_to_local_id_map_[point_attribute_id];
}
private:
// List of attribute ids that need to be encoded with this encoder.
std::vector<int32_t> point_attribute_ids_;
// Map between point attribute id and the local id (i.e., the inverse of the
// |point_attribute_ids_|.
std::vector<int32_t> point_attribute_to_local_id_map_;
PointCloudEncoder *point_cloud_encoder_;
const PointCloud *point_cloud_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_

View File

@ -0,0 +1,556 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/kd_tree_attributes_decoder.h"
#include "draco/compression/attributes/kd_tree_attributes_shared.h"
#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
#include "draco/compression/point_cloud/algorithms/float_points_tree_decoder.h"
#include "draco/compression/point_cloud/point_cloud_decoder.h"
#include "draco/core/draco_types.h"
#include "draco/core/varint_decoding.h"
namespace draco {
// attribute, offset_dimensionality, data_type, data_size, num_components
using AttributeTuple =
std::tuple<PointAttribute *, uint32_t, DataType, uint32_t, uint32_t>;
// Output iterator that is used to decode values directly into the data buffer
// of the modified PointAttribute.
// The extension of this iterator beyond the DT_UINT32 concerns itself only with
// the size of the data for efficiency, not the type. DataType is conveyed in
// but is an unused field populated for any future logic/special casing.
// DT_UINT32 and all other 4-byte types are naturally supported from the size of
// data in the kd tree encoder. DT_UINT16 and DT_UINT8 are supported by way
// of byte copies into a temporary memory buffer.
template <class CoeffT>
class PointAttributeVectorOutputIterator {
typedef PointAttributeVectorOutputIterator<CoeffT> Self;
public:
PointAttributeVectorOutputIterator(
PointAttributeVectorOutputIterator &&that) = default;
explicit PointAttributeVectorOutputIterator(
const std::vector<AttributeTuple> &atts)
: attributes_(atts), point_id_(0) {
DRACO_DCHECK_GE(atts.size(), 1);
uint32_t required_decode_bytes = 0;
for (auto index = 0; index < attributes_.size(); index++) {
const AttributeTuple &att = attributes_[index];
required_decode_bytes = (std::max)(required_decode_bytes,
std::get<3>(att) * std::get<4>(att));
}
memory_.resize(required_decode_bytes);
data_ = memory_.data();
}
const Self &operator++() {
++point_id_;
return *this;
}
// We do not want to do ANY copying of this constructor so this particular
// operator is disabled for performance reasons.
// Self operator++(int) {
// Self copy = *this;
// ++point_id_;
// return copy;
// }
Self &operator*() { return *this; }
// Still needed in some cases.
// TODO(hemmer): remove.
// hardcoded to 3 based on legacy usage.
const Self &operator=(const VectorD<CoeffT, 3> &val) {
DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute.
AttributeTuple &att = attributes_[0];
PointAttribute *attribute = std::get<0>(att);
const uint32_t &offset = std::get<1>(att);
DRACO_DCHECK_EQ(offset, 0); // expected to be zero
attribute->SetAttributeValue(attribute->mapped_index(point_id_),
&val[0] + offset);
return *this;
}
// Additional operator taking std::vector as argument.
const Self &operator=(const std::vector<CoeffT> &val) {
for (auto index = 0; index < attributes_.size(); index++) {
AttributeTuple &att = attributes_[index];
PointAttribute *attribute = std::get<0>(att);
const uint32_t &offset = std::get<1>(att);
const uint32_t &data_size = std::get<3>(att);
const uint32_t &num_components = std::get<4>(att);
const uint32_t *data_source = val.data() + offset;
if (data_size < 4) { // handle uint16_t, uint8_t
// selectively copy data bytes
uint8_t *data_counter = data_;
for (uint32_t index = 0; index < num_components;
index += 1, data_counter += data_size) {
std::memcpy(data_counter, data_source + index, data_size);
}
// redirect to copied data
data_source = reinterpret_cast<uint32_t *>(data_);
}
const AttributeValueIndex avi = attribute->mapped_index(point_id_);
if (avi >= static_cast<uint32_t>(attribute->size())) {
return *this;
}
attribute->SetAttributeValue(avi, data_source);
}
return *this;
}
private:
// preallocated memory for buffering different data sizes. Never reallocated.
std::vector<uint8_t> memory_;
uint8_t *data_;
std::vector<AttributeTuple> attributes_;
PointIndex point_id_;
// NO COPY
PointAttributeVectorOutputIterator(
const PointAttributeVectorOutputIterator &that) = delete;
PointAttributeVectorOutputIterator &operator=(
PointAttributeVectorOutputIterator const &) = delete;
};
KdTreeAttributesDecoder::KdTreeAttributesDecoder() {}
bool KdTreeAttributesDecoder::DecodePortableAttributes(
DecoderBuffer *in_buffer) {
if (in_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 3)) {
// Old bitstream does everything in the
// DecodeDataNeededByPortableTransforms() method.
return true;
}
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level)) {
return false;
}
const int32_t num_points = GetDecoder()->point_cloud()->num_points();
// Decode data using the kd tree decoding into integer (portable) attributes.
// We first need to go over all attributes and create a new portable storage
// for those attributes that need it (floating point attributes that have to
// be dequantized after decoding).
const int num_attributes = GetNumAttributes();
uint32_t total_dimensionality = 0; // position is a required dimension
std::vector<AttributeTuple> atts(num_attributes);
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
// All attributes have the same number of values and identity mapping
// between PointIndex and AttributeValueIndex.
att->Reset(num_points);
att->SetIdentityMapping();
PointAttribute *target_att = nullptr;
if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
att->data_type() == DT_UINT8) {
// We can decode to these attributes directly.
target_att = att;
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// Prepare storage for data that is used to convert unsigned values back
// to the signed ones.
for (int c = 0; c < att->num_components(); ++c) {
min_signed_values_.push_back(0);
}
target_att = att;
} else if (att->data_type() == DT_FLOAT32) {
// Create a portable attribute that will hold the decoded data. We will
// dequantize the decoded data to the final attribute later on.
const int num_components = att->num_components();
GeometryAttribute va;
va.Init(att->attribute_type(), nullptr, num_components, DT_UINT32, false,
num_components * DataTypeLength(DT_UINT32), 0);
std::unique_ptr<PointAttribute> port_att(new PointAttribute(va));
port_att->SetIdentityMapping();
port_att->Reset(num_points);
quantized_portable_attributes_.push_back(std::move(port_att));
target_att = quantized_portable_attributes_.back().get();
} else {
// Unsupported type.
return false;
}
// Add attribute to the output iterator used by the core algorithm.
const DataType data_type = target_att->data_type();
const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
const uint32_t num_components = target_att->num_components();
atts[i] = std::make_tuple(target_att, total_dimensionality, data_type,
data_size, num_components);
total_dimensionality += num_components;
}
PointAttributeVectorOutputIterator<uint32_t> out_it(atts);
switch (compression_level) {
case 0: {
DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 1: {
DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 2: {
DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 3: {
DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 4: {
DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 5: {
DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 6: {
DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
default:
return false;
}
return true;
}
bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
DecoderBuffer *in_buffer) {
if (in_buffer->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 3)) {
// Decode quantization data for each attribute that need it.
// TODO(ostava): This should be moved to AttributeQuantizationTransform.
std::vector<float> min_value;
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
GetDecoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_FLOAT32) {
const int num_components = att->num_components();
min_value.resize(num_components);
if (!in_buffer->Decode(&min_value[0], sizeof(float) * num_components)) {
return false;
}
float max_value_dif;
if (!in_buffer->Decode(&max_value_dif)) {
return false;
}
uint8_t quantization_bits;
if (!in_buffer->Decode(&quantization_bits) || quantization_bits > 31) {
return false;
}
AttributeQuantizationTransform transform;
if (!transform.SetParameters(quantization_bits, min_value.data(),
num_components, max_value_dif)) {
return false;
}
const int num_transforms =
static_cast<int>(attribute_quantization_transforms_.size());
if (!transform.TransferToAttribute(
quantized_portable_attributes_[num_transforms].get())) {
return false;
}
attribute_quantization_transforms_.push_back(transform);
}
}
// Decode transform data for signed integer attributes.
for (int i = 0; i < min_signed_values_.size(); ++i) {
int32_t val;
if (!DecodeVarint(&val, in_buffer)) {
return false;
}
min_signed_values_[i] = val;
}
return true;
}
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
// Handle old bitstream
// Figure out the total dimensionality of the point cloud
const uint32_t attribute_count = GetNumAttributes();
uint32_t total_dimensionality = 0; // position is a required dimension
std::vector<AttributeTuple> atts(attribute_count);
for (auto attribute_index = 0;
static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) // increment the dimensionality as needed...
{
const int att_id = GetAttributeId(attribute_index);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
const DataType data_type = att->data_type();
const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
const uint32_t num_components = att->num_components();
if (data_size > 4) {
return false;
}
atts[attribute_index] = std::make_tuple(
att, total_dimensionality, data_type, data_size, num_components);
// everything is treated as 32bit in the encoder.
total_dimensionality += num_components;
}
const int att_id = GetAttributeId(0);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
att->SetIdentityMapping();
// Decode method
uint8_t method;
if (!in_buffer->Decode(&method)) {
return false;
}
if (method == KdTreeAttributesEncodingMethod::kKdTreeQuantizationEncoding) {
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level)) {
return false;
}
uint32_t num_points = 0;
if (!in_buffer->Decode(&num_points)) {
return false;
}
att->Reset(num_points);
FloatPointsTreeDecoder decoder;
decoder.set_num_points_from_header(num_points);
PointAttributeVectorOutputIterator<float> out_it(atts);
if (!decoder.DecodePointCloud(in_buffer, out_it)) {
return false;
}
} else if (method == KdTreeAttributesEncodingMethod::kKdTreeIntegerEncoding) {
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level)) {
return false;
}
if (6 < compression_level) {
DRACO_LOGE(
"KdTreeAttributesDecoder: compression level %i not supported.\n",
compression_level);
return false;
}
uint32_t num_points;
if (!in_buffer->Decode(&num_points)) {
return false;
}
for (auto attribute_index = 0;
static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) {
const int att_id = GetAttributeId(attribute_index);
PointAttribute *const attr =
GetDecoder()->point_cloud()->attribute(att_id);
attr->Reset(num_points);
attr->SetIdentityMapping();
};
PointAttributeVectorOutputIterator<uint32_t> out_it(atts);
switch (compression_level) {
case 0: {
DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 1: {
DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 2: {
DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 3: {
DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 4: {
DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 5: {
DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 6: {
DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
default:
return false;
}
} else {
// Invalid method.
return false;
}
return true;
#else
return false;
#endif
}
template <typename SignedDataTypeT>
bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType(
PointAttribute *att, int num_processed_signed_components) {
typedef typename std::make_unsigned<SignedDataTypeT>::type UnsignedType;
std::vector<UnsignedType> unsigned_val(att->num_components());
std::vector<SignedDataTypeT> signed_val(att->num_components());
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
++avi) {
att->GetValue(avi, &unsigned_val[0]);
for (int c = 0; c < att->num_components(); ++c) {
// Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for
// smaller data types.
signed_val[c] = static_cast<SignedDataTypeT>(
static_cast<int32_t>(unsigned_val[c]) +
min_signed_values_[num_processed_signed_components + c]);
}
att->SetAttributeValue(avi, &signed_val[0]);
}
return true;
}
bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() {
if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) {
return true;
}
int num_processed_quantized_attributes = 0;
int num_processed_signed_components = 0;
// Dequantize attributes that needed it.
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
std::vector<uint32_t> unsigned_val(att->num_components());
std::vector<int32_t> signed_val(att->num_components());
// Values are stored as unsigned in the attribute, make them signed again.
if (att->data_type() == DT_INT32) {
if (!TransformAttributeBackToSignedType<int32_t>(
att, num_processed_signed_components)) {
return false;
}
} else if (att->data_type() == DT_INT16) {
if (!TransformAttributeBackToSignedType<int16_t>(
att, num_processed_signed_components)) {
return false;
}
} else if (att->data_type() == DT_INT8) {
if (!TransformAttributeBackToSignedType<int8_t>(
att, num_processed_signed_components)) {
return false;
}
}
num_processed_signed_components += att->num_components();
} else if (att->data_type() == DT_FLOAT32) {
// TODO(ostava): This code should be probably moved out to attribute
// transform and shared with the SequentialQuantizationAttributeDecoder.
const PointAttribute *const src_att =
quantized_portable_attributes_[num_processed_quantized_attributes]
.get();
const AttributeQuantizationTransform &transform =
attribute_quantization_transforms_
[num_processed_quantized_attributes];
num_processed_quantized_attributes++;
if (GetDecoder()->options()->GetAttributeBool(
att->attribute_type(), "skip_attribute_transform", false)) {
// Attribute transform should not be performed. In this case, we replace
// the output geometry attribute with the portable attribute.
// TODO(ostava): We can potentially avoid this copy by introducing a new
// mechanism that would allow to use the final attributes as portable
// attributes for predictors that may need them.
att->CopyFrom(*src_att);
continue;
}
// Convert all quantized values back to floats.
const int32_t max_quantized_value =
(1u << static_cast<uint32_t>(transform.quantization_bits())) - 1;
const int num_components = att->num_components();
const int entry_size = sizeof(float) * num_components;
const std::unique_ptr<float[]> att_val(new float[num_components]);
int quant_val_id = 0;
int out_byte_pos = 0;
Dequantizer dequantizer;
if (!dequantizer.Init(transform.range(), max_quantized_value)) {
return false;
}
const uint32_t *const portable_attribute_data =
reinterpret_cast<const uint32_t *>(
src_att->GetAddress(AttributeValueIndex(0)));
for (uint32_t i = 0; i < src_att->size(); ++i) {
for (int c = 0; c < num_components; ++c) {
float value = dequantizer.DequantizeFloat(
portable_attribute_data[quant_val_id++]);
value = value + transform.min_value(c);
att_val[c] = value;
}
// Store the floating point value into the attribute buffer.
att->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
out_byte_pos += entry_size;
}
}
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,46 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/attributes_decoder.h"
namespace draco {
// Decodes attributes encoded with the KdTreeAttributesEncoder.
class KdTreeAttributesDecoder : public AttributesDecoder {
public:
KdTreeAttributesDecoder();
protected:
bool DecodePortableAttributes(DecoderBuffer *in_buffer) override;
bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override;
bool TransformAttributesToOriginalFormat() override;
private:
template <typename SignedDataTypeT>
bool TransformAttributeBackToSignedType(PointAttribute *att,
int num_processed_signed_components);
std::vector<AttributeQuantizationTransform>
attribute_quantization_transforms_;
std::vector<int32_t> min_signed_values_;
std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_

View File

@ -0,0 +1,305 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/kd_tree_attributes_encoder.h"
#include "draco/compression/attributes/kd_tree_attributes_shared.h"
#include "draco/compression/attributes/point_d_vector.h"
#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
#include "draco/compression/point_cloud/algorithms/float_points_tree_encoder.h"
#include "draco/compression/point_cloud/point_cloud_encoder.h"
#include "draco/core/varint_encoding.h"
namespace draco {
KdTreeAttributesEncoder::KdTreeAttributesEncoder() : num_components_(0) {}
KdTreeAttributesEncoder::KdTreeAttributesEncoder(int att_id)
: AttributesEncoder(att_id), num_components_(0) {}
bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
// Convert any of the input attributes into a format that can be processed by
// the kd tree encoder (quantization of floating attributes for now).
const size_t num_points = encoder()->point_cloud()->num_points();
int num_components = 0;
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
num_components += att->num_components();
}
num_components_ = num_components;
// Go over all attributes and quantize them if needed.
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_FLOAT32) {
// Quantization path.
AttributeQuantizationTransform attribute_quantization_transform;
const int quantization_bits = encoder()->options()->GetAttributeInt(
att_id, "quantization_bits", -1);
if (quantization_bits < 1) {
return false;
}
if (encoder()->options()->IsAttributeOptionSet(att_id,
"quantization_origin") &&
encoder()->options()->IsAttributeOptionSet(att_id,
"quantization_range")) {
// Quantization settings are explicitly specified in the provided
// options.
std::vector<float> quantization_origin(att->num_components());
encoder()->options()->GetAttributeVector(att_id, "quantization_origin",
att->num_components(),
&quantization_origin[0]);
const float range = encoder()->options()->GetAttributeFloat(
att_id, "quantization_range", 1.f);
attribute_quantization_transform.SetParameters(
quantization_bits, quantization_origin.data(),
att->num_components(), range);
} else {
// Compute quantization settings from the attribute values.
if (!attribute_quantization_transform.ComputeParameters(
*att, quantization_bits)) {
return false;
}
}
attribute_quantization_transforms_.push_back(
attribute_quantization_transform);
// Store the quantized attribute in an array that will be used when we do
// the actual encoding of the data.
auto portable_att =
attribute_quantization_transform.InitTransformedAttribute(*att,
num_points);
attribute_quantization_transform.TransformAttribute(*att, {},
portable_att.get());
quantized_portable_attributes_.push_back(std::move(portable_att));
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// For signed types, find the minimum value for each component. These
// values are going to be used to transform the attribute values to
// unsigned integers that can be processed by the core kd tree algorithm.
std::vector<int32_t> min_value(att->num_components(),
std::numeric_limits<int32_t>::max());
std::vector<int32_t> act_value(att->num_components());
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
++avi) {
att->ConvertValue<int32_t>(avi, &act_value[0]);
for (int c = 0; c < att->num_components(); ++c) {
if (min_value[c] > act_value[c]) {
min_value[c] = act_value[c];
}
}
}
for (int c = 0; c < att->num_components(); ++c) {
min_signed_values_.push_back(min_value[c]);
}
}
}
return true;
}
bool KdTreeAttributesEncoder::EncodeDataNeededByPortableTransforms(
EncoderBuffer *out_buffer) {
// Store quantization settings for all attributes that need it.
for (int i = 0; i < attribute_quantization_transforms_.size(); ++i) {
attribute_quantization_transforms_[i].EncodeParameters(out_buffer);
}
// Encode data needed for transforming signed integers to unsigned ones.
for (int i = 0; i < min_signed_values_.size(); ++i) {
EncodeVarint<int32_t>(min_signed_values_[i], out_buffer);
}
return true;
}
bool KdTreeAttributesEncoder::EncodePortableAttributes(
EncoderBuffer *out_buffer) {
// Encode the data using the kd tree encoder algorithm. The data is first
// copied to a PointDVector that provides all the API expected by the core
// encoding algorithm.
// We limit the maximum value of compression_level to 6 as we don't currently
// have viable algorithms for higher compression levels.
uint8_t compression_level =
std::min(10 - encoder()->options()->GetSpeed(), 6);
DRACO_DCHECK_LE(compression_level, 6);
if (compression_level == 6 && num_components_ > 15) {
// Don't use compression level for CL >= 6. Axis selection is currently
// encoded using 4 bits.
compression_level = 5;
}
out_buffer->Encode(compression_level);
// Init PointDVector. The number of dimensions is equal to the total number
// of dimensions across all attributes.
const int num_points = encoder()->point_cloud()->num_points();
PointDVector<uint32_t> point_vector(num_points, num_components_);
int num_processed_components = 0;
int num_processed_quantized_attributes = 0;
int num_processed_signed_components = 0;
// Copy data to the point vector.
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
const PointAttribute *source_att = nullptr;
if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
att->data_type() == DT_UINT8 || att->data_type() == DT_INT32 ||
att->data_type() == DT_INT16 || att->data_type() == DT_INT8) {
// Use the original attribute.
source_att = att;
} else if (att->data_type() == DT_FLOAT32) {
// Use the portable (quantized) attribute instead.
source_att =
quantized_portable_attributes_[num_processed_quantized_attributes]
.get();
num_processed_quantized_attributes++;
} else {
// Unsupported data type.
return false;
}
if (source_att == nullptr) {
return false;
}
// Copy source_att to the vector.
if (source_att->data_type() == DT_UINT32) {
// If the data type is the same as the one used by the point vector, we
// can directly copy individual elements.
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
const uint8_t *const att_value_address = source_att->GetAddress(avi);
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
att_value_address);
}
} else if (source_att->data_type() == DT_INT32 ||
source_att->data_type() == DT_INT16 ||
source_att->data_type() == DT_INT8) {
// Signed values need to be converted to unsigned before they are stored
// in the point vector.
std::vector<int32_t> signed_point(source_att->num_components());
std::vector<uint32_t> unsigned_point(source_att->num_components());
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
source_att->ConvertValue<int32_t>(avi, &signed_point[0]);
for (int c = 0; c < source_att->num_components(); ++c) {
unsigned_point[c] =
signed_point[c] -
min_signed_values_[num_processed_signed_components + c];
}
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
&unsigned_point[0]);
}
num_processed_signed_components += source_att->num_components();
} else {
// If the data type of the attribute is different, we have to convert the
// value before we put it to the point vector.
std::vector<uint32_t> point(source_att->num_components());
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
source_att->ConvertValue<uint32_t>(avi, &point[0]);
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
point.data());
}
}
num_processed_components += source_att->num_components();
}
// Compute the maximum bit length needed for the kd tree encoding.
int num_bits = 0;
const uint32_t *data = point_vector[0];
for (int i = 0; i < num_points * num_components_; ++i) {
if (data[i] > 0) {
const int msb = MostSignificantBit(data[i]) + 1;
if (msb > num_bits) {
num_bits = msb;
}
}
}
switch (compression_level) {
case 6: {
DynamicIntegerPointsKdTreeEncoder<6> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 5: {
DynamicIntegerPointsKdTreeEncoder<5> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 4: {
DynamicIntegerPointsKdTreeEncoder<4> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 3: {
DynamicIntegerPointsKdTreeEncoder<3> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 2: {
DynamicIntegerPointsKdTreeEncoder<2> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 1: {
DynamicIntegerPointsKdTreeEncoder<1> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 0: {
DynamicIntegerPointsKdTreeEncoder<0> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
// Compression level and/or encoding speed seem wrong.
default:
return false;
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,51 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/compression/config/compression_shared.h"
namespace draco {
// Encodes all attributes of a given PointCloud using one of the available
// Kd-tree compression methods.
// See compression/point_cloud/point_cloud_kd_tree_encoder.h for more details.
class KdTreeAttributesEncoder : public AttributesEncoder {
public:
KdTreeAttributesEncoder();
explicit KdTreeAttributesEncoder(int att_id);
uint8_t GetUniqueId() const override { return KD_TREE_ATTRIBUTE_ENCODER; }
protected:
bool TransformAttributesToPortableFormat() override;
bool EncodePortableAttributes(EncoderBuffer *out_buffer) override;
bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override;
private:
std::vector<AttributeQuantizationTransform>
attribute_quantization_transforms_;
// Min signed values are used to transform signed integers into unsigned ones
// (by subtracting the min signed value for each component).
std::vector<int32_t> min_signed_values_;
std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
int num_components_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_

View File

@ -0,0 +1,28 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
namespace draco {
// Defines types of kD-tree compression
enum KdTreeAttributesEncodingMethod {
kKdTreeQuantizationEncoding = 0,
kKdTreeIntegerEncoding
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_

View File

@ -0,0 +1,51 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
#include "draco/compression/attributes/points_sequencer.h"
namespace draco {
// A simple sequencer that generates a linear sequence [0, num_points - 1].
// I.e., the order of the points is preserved for the input data.
class LinearSequencer : public PointsSequencer {
public:
explicit LinearSequencer(int32_t num_points) : num_points_(num_points) {}
bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) override {
attribute->SetIdentityMapping();
return true;
}
protected:
bool GenerateSequenceInternal() override {
if (num_points_ < 0) {
return false;
}
out_point_ids()->resize(num_points_);
for (int i = 0; i < num_points_; ++i) {
out_point_ids()->at(i) = PointIndex(i);
}
return true;
}
private:
int32_t num_points_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_

View File

@ -0,0 +1,58 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
#include <inttypes.h>
#include <vector>
#include "draco/attributes/geometry_indices.h"
namespace draco {
// Data used for encoding and decoding of mesh attributes.
struct MeshAttributeIndicesEncodingData {
MeshAttributeIndicesEncodingData() : num_values(0) {}
void Init(int num_vertices) {
vertex_to_encoded_attribute_value_index_map.resize(num_vertices);
// We expect to store one value for each vertex.
encoded_attribute_value_index_to_corner_map.reserve(num_vertices);
}
// Array for storing the corner ids in the order their associated attribute
// entries were encoded/decoded. For every encoded attribute value entry we
// store exactly one corner. I.e., this is the mapping between an encoded
// attribute entry ids and corner ids. This map is needed for example by
// prediction schemes. Note that not all corners are included in this map,
// e.g., if multiple corners share the same attribute value, only one of these
// corners will be usually included.
std::vector<CornerIndex> encoded_attribute_value_index_to_corner_map;
// Map for storing encoding order of attribute entries for each vertex.
// i.e. Mapping between vertices and their corresponding attribute entry ids
// that are going to be used by the decoder.
// -1 if an attribute entry hasn't been encoded/decoded yet.
std::vector<int32_t> vertex_to_encoded_attribute_value_index_map;
// Total number of encoded/decoded attribute entries.
int num_values;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_

View File

@ -0,0 +1,360 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Utilities for converting unit vectors to octahedral coordinates and back.
// For more details about octahedral coordinates, see for example Cigolle
// et al.'14 “A Survey of Efficient Representations for Independent Unit
// Vectors”.
//
// In short this is motivated by an octahedron inscribed into a sphere. The
// direction of the normal vector can be defined by a point on the octahedron.
// On the right hemisphere (x > 0) this point is projected onto the x = 0 plane,
// that is, the right side of the octahedron forms a diamond like shape. The
// left side of the octahedron is also projected onto the x = 0 plane, however,
// in this case we flap the triangles of the diamond outward. Afterwards we
// shift the resulting square such that all values are positive.
//
// Important values in this file:
// * q: number of quantization bits
// * max_quantized_value: the max value representable with q bits (odd)
// * max_value: max value of the diamond = max_quantized_value - 1 (even)
// * center_value: center of the diamond after shift
//
// Note that the parameter space is somewhat periodic, e.g. (0, 0) ==
// (max_value, max_value), which is also why the diamond is one smaller than the
// maximal representable value in order to have an odd range of values.
#ifndef DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
#define DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
#include <inttypes.h>
#include <algorithm>
#include <cmath>
#include "draco/core/macros.h"
namespace draco {
class OctahedronToolBox {
public:
OctahedronToolBox()
: quantization_bits_(-1),
max_quantized_value_(-1),
max_value_(-1),
dequantization_scale_(1.f),
center_value_(-1) {}
bool SetQuantizationBits(int32_t q) {
if (q < 2 || q > 30) {
return false;
}
quantization_bits_ = q;
max_quantized_value_ = (1 << quantization_bits_) - 1;
max_value_ = max_quantized_value_ - 1;
dequantization_scale_ = 2.f / max_value_;
center_value_ = max_value_ / 2;
return true;
}
bool IsInitialized() const { return quantization_bits_ != -1; }
// Convert all edge points in the top left and bottom right quadrants to
// their corresponding position in the bottom left and top right quadrants.
// Convert all corner edge points to the top right corner.
inline void CanonicalizeOctahedralCoords(int32_t s, int32_t t, int32_t *out_s,
int32_t *out_t) const {
if ((s == 0 && t == 0) || (s == 0 && t == max_value_) ||
(s == max_value_ && t == 0)) {
s = max_value_;
t = max_value_;
} else if (s == 0 && t > center_value_) {
t = center_value_ - (t - center_value_);
} else if (s == max_value_ && t < center_value_) {
t = center_value_ + (center_value_ - t);
} else if (t == max_value_ && s < center_value_) {
s = center_value_ + (center_value_ - s);
} else if (t == 0 && s > center_value_) {
s = center_value_ - (s - center_value_);
}
*out_s = s;
*out_t = t;
}
// Converts an integer vector to octahedral coordinates.
// Precondition: |int_vec| abs sum must equal center value.
inline void IntegerVectorToQuantizedOctahedralCoords(const int32_t *int_vec,
int32_t *out_s,
int32_t *out_t) const {
DRACO_DCHECK_EQ(
std::abs(int_vec[0]) + std::abs(int_vec[1]) + std::abs(int_vec[2]),
center_value_);
int32_t s, t;
if (int_vec[0] >= 0) {
// Right hemisphere.
s = (int_vec[1] + center_value_);
t = (int_vec[2] + center_value_);
} else {
// Left hemisphere.
if (int_vec[1] < 0) {
s = std::abs(int_vec[2]);
} else {
s = (max_value_ - std::abs(int_vec[2]));
}
if (int_vec[2] < 0) {
t = std::abs(int_vec[1]);
} else {
t = (max_value_ - std::abs(int_vec[1]));
}
}
CanonicalizeOctahedralCoords(s, t, out_s, out_t);
}
template <class T>
void FloatVectorToQuantizedOctahedralCoords(const T *vector, int32_t *out_s,
int32_t *out_t) const {
const double abs_sum = std::abs(static_cast<double>(vector[0])) +
std::abs(static_cast<double>(vector[1])) +
std::abs(static_cast<double>(vector[2]));
// Adjust values such that abs sum equals 1.
double scaled_vector[3];
if (abs_sum > 1e-6) {
// Scale needed to project the vector to the surface of an octahedron.
const double scale = 1.0 / abs_sum;
scaled_vector[0] = vector[0] * scale;
scaled_vector[1] = vector[1] * scale;
scaled_vector[2] = vector[2] * scale;
} else {
scaled_vector[0] = 1.0;
scaled_vector[1] = 0;
scaled_vector[2] = 0;
}
// Scale vector such that the sum equals the center value.
int32_t int_vec[3];
int_vec[0] =
static_cast<int32_t>(floor(scaled_vector[0] * center_value_ + 0.5));
int_vec[1] =
static_cast<int32_t>(floor(scaled_vector[1] * center_value_ + 0.5));
// Make sure the sum is exactly the center value.
int_vec[2] = center_value_ - std::abs(int_vec[0]) - std::abs(int_vec[1]);
if (int_vec[2] < 0) {
// If the sum of first two coordinates is too large, we need to decrease
// the length of one of the coordinates.
if (int_vec[1] > 0) {
int_vec[1] += int_vec[2];
} else {
int_vec[1] -= int_vec[2];
}
int_vec[2] = 0;
}
// Take care of the sign.
if (scaled_vector[2] < 0) {
int_vec[2] *= -1;
}
IntegerVectorToQuantizedOctahedralCoords(int_vec, out_s, out_t);
}
// Normalize |vec| such that its abs sum is equal to the center value;
template <class T>
void CanonicalizeIntegerVector(T *vec) const {
static_assert(std::is_integral<T>::value, "T must be an integral type.");
static_assert(std::is_signed<T>::value, "T must be a signed type.");
const int64_t abs_sum = static_cast<int64_t>(std::abs(vec[0])) +
static_cast<int64_t>(std::abs(vec[1])) +
static_cast<int64_t>(std::abs(vec[2]));
if (abs_sum == 0) {
vec[0] = center_value_; // vec[1] == v[2] == 0
} else {
vec[0] =
(static_cast<int64_t>(vec[0]) * static_cast<int64_t>(center_value_)) /
abs_sum;
vec[1] =
(static_cast<int64_t>(vec[1]) * static_cast<int64_t>(center_value_)) /
abs_sum;
if (vec[2] >= 0) {
vec[2] = center_value_ - std::abs(vec[0]) - std::abs(vec[1]);
} else {
vec[2] = -(center_value_ - std::abs(vec[0]) - std::abs(vec[1]));
}
}
}
inline void QuantizedOctahedralCoordsToUnitVector(int32_t in_s, int32_t in_t,
float *out_vector) const {
OctahedralCoordsToUnitVector(in_s * dequantization_scale_ - 1.f,
in_t * dequantization_scale_ - 1.f,
out_vector);
}
// |s| and |t| are expected to be signed values.
inline bool IsInDiamond(const int32_t &s, const int32_t &t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(s, center_value_);
DRACO_DCHECK_LE(t, center_value_);
DRACO_DCHECK_GE(s, -center_value_);
DRACO_DCHECK_GE(t, -center_value_);
return std::abs(s) + std::abs(t) <= center_value_;
}
void InvertDiamond(int32_t *s, int32_t *t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(*s, center_value_);
DRACO_DCHECK_LE(*t, center_value_);
DRACO_DCHECK_GE(*s, -center_value_);
DRACO_DCHECK_GE(*t, -center_value_);
int32_t sign_s = 0;
int32_t sign_t = 0;
if (*s >= 0 && *t >= 0) {
sign_s = 1;
sign_t = 1;
} else if (*s <= 0 && *t <= 0) {
sign_s = -1;
sign_t = -1;
} else {
sign_s = (*s > 0) ? 1 : -1;
sign_t = (*t > 0) ? 1 : -1;
}
const int32_t corner_point_s = sign_s * center_value_;
const int32_t corner_point_t = sign_t * center_value_;
*s = 2 * *s - corner_point_s;
*t = 2 * *t - corner_point_t;
if (sign_s * sign_t >= 0) {
int32_t temp = *s;
*s = -*t;
*t = -temp;
} else {
std::swap(*s, *t);
}
*s = (*s + corner_point_s) / 2;
*t = (*t + corner_point_t) / 2;
}
void InvertDirection(int32_t *s, int32_t *t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(*s, center_value_);
DRACO_DCHECK_LE(*t, center_value_);
DRACO_DCHECK_GE(*s, -center_value_);
DRACO_DCHECK_GE(*t, -center_value_);
*s *= -1;
*t *= -1;
this->InvertDiamond(s, t);
}
// For correction values.
int32_t ModMax(int32_t x) const {
if (x > this->center_value()) {
return x - this->max_quantized_value();
}
if (x < -this->center_value()) {
return x + this->max_quantized_value();
}
return x;
}
// For correction values.
int32_t MakePositive(int32_t x) const {
DRACO_DCHECK_LE(x, this->center_value() * 2);
if (x < 0) {
return x + this->max_quantized_value();
}
return x;
}
int32_t quantization_bits() const { return quantization_bits_; }
int32_t max_quantized_value() const { return max_quantized_value_; }
int32_t max_value() const { return max_value_; }
int32_t center_value() const { return center_value_; }
private:
inline void OctahedralCoordsToUnitVector(float in_s_scaled, float in_t_scaled,
float *out_vector) const {
// Background about the encoding:
// A normal is encoded in a normalized space <s, t> depicted below. The
// encoding correponds to an octahedron that is unwrapped to a 2D plane.
// During encoding, a normal is projected to the surface of the octahedron
// and the projection is then unwrapped to the 2D plane. Decoding is the
// reverse of this process.
// All points in the central diamond are located on triangles on the
// right "hemisphere" of the octahedron while all points outside of the
// diamond are on the left hemisphere (basically, they would have to be
// wrapped along the diagonal edges to form the octahedron). The central
// point corresponds to the right most vertex of the octahedron and all
// corners of the plane correspond to the left most vertex of the
// octahedron.
//
// t
// ^ *-----*-----*
// | | /|\ |
// | / | \ |
// | / | \ |
// | / | \ |
// *-----*---- *
// | \ | / |
// | \ | / |
// | \ | / |
// | \|/ |
// *-----*-----* --> s
// Note that the input |in_s_scaled| and |in_t_scaled| are already scaled to
// <-1, 1> range. This way, the central point is at coordinate (0, 0).
float y = in_s_scaled;
float z = in_t_scaled;
// Remaining coordinate can be computed by projecting the (y, z) values onto
// the surface of the octahedron.
const float x = 1.f - abs(y) - abs(z);
// |x| is essentially a signed distance from the diagonal edges of the
// diamond shown on the figure above. It is positive for all points in the
// diamond (right hemisphere) and negative for all points outside the
// diamond (left hemisphere). For all points on the left hemisphere we need
// to update their (y, z) coordinates to account for the wrapping along
// the edges of the diamond.
float x_offset = -x;
x_offset = x_offset < 0 ? 0 : x_offset;
// This will do nothing for the points on the right hemisphere but it will
// mirror the (y, z) location along the nearest diagonal edge of the
// diamond.
y += y < 0 ? x_offset : -x_offset;
z += z < 0 ? x_offset : -x_offset;
// Normalize the computed vector.
const float norm_squared = x * x + y * y + z * z;
if (norm_squared < 1e-6) {
out_vector[0] = 0;
out_vector[1] = 0;
out_vector[2] = 0;
} else {
const float d = 1.0f / std::sqrt(norm_squared);
out_vector[0] = x * d;
out_vector[1] = y * d;
out_vector[2] = z * d;
}
}
int32_t quantization_bits_;
int32_t max_quantized_value_;
int32_t max_value_;
float dequantization_scale_;
int32_t center_value_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_

View File

@ -0,0 +1,279 @@
// Copyright 2018 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
#include <cstring>
#include <memory>
#include <vector>
#include "draco/core/macros.h"
namespace draco {
// The main class of this file is PointDVector providing an interface similar to
// std::vector<PointD> for arbitrary number of dimensions (without a template
// argument). PointDVectorIterator is a random access iterator, which allows for
// compatibility with existing algorithms. PseudoPointD provides for a view on
// the individual items in a contiguous block of memory, which is compatible
// with the swap function and is returned by a dereference of
// PointDVectorIterator. Swap functions provide for compatibility/specialization
// that allows these classes to work with currently utilized STL functions.
// This class allows for swap functionality from the RandomIterator
// It seems problematic to bring this inside PointDVector due to templating.
template <typename internal_t>
class PseudoPointD {
public:
PseudoPointD(internal_t *mem, internal_t dimension)
: mem_(mem), dimension_(dimension) {}
// Specifically copies referenced memory
void swap(PseudoPointD &other) noexcept {
for (internal_t dim = 0; dim < dimension_; dim += 1) {
std::swap(mem_[dim], other.mem_[dim]);
}
}
PseudoPointD(const PseudoPointD &other)
: mem_(other.mem_), dimension_(other.dimension_) {}
const internal_t &operator[](const size_t &n) const {
DRACO_DCHECK_LT(n, dimension_);
return mem_[n];
}
internal_t &operator[](const size_t &n) {
DRACO_DCHECK_LT(n, dimension_);
return mem_[n];
}
bool operator==(const PseudoPointD &other) const {
for (auto dim = 0; dim < dimension_; dim += 1) {
if (mem_[dim] != other.mem_[dim]) {
return false;
}
}
return true;
}
bool operator!=(const PseudoPointD &other) const {
return !this->operator==(other);
}
private:
internal_t *const mem_;
const internal_t dimension_;
};
// It seems problematic to bring this inside PointDVector due to templating.
template <typename internal_t>
void swap(draco::PseudoPointD<internal_t> &&a,
draco::PseudoPointD<internal_t> &&b) noexcept {
a.swap(b);
};
template <typename internal_t>
void swap(draco::PseudoPointD<internal_t> &a,
draco::PseudoPointD<internal_t> &b) noexcept {
a.swap(b);
};
template <typename internal_t>
class PointDVector {
public:
PointDVector(const uint32_t n_items, const uint32_t dimensionality)
: n_items_(n_items),
dimensionality_(dimensionality),
item_size_bytes_(dimensionality * sizeof(internal_t)),
data_(n_items * dimensionality),
data0_(data_.data()) {}
// random access iterator
class PointDVectorIterator
: public std::iterator<std::random_access_iterator_tag, size_t, size_t> {
friend class PointDVector;
public:
// std::iter_swap is called inside of std::partition and needs this
// specialized support
PseudoPointD<internal_t> operator*() const {
return PseudoPointD<internal_t>(vec_->data0_ + item_ * dimensionality_,
dimensionality_);
}
const PointDVectorIterator &operator++() {
item_ += 1;
return *this;
}
const PointDVectorIterator &operator--() {
item_ -= 1;
return *this;
}
PointDVectorIterator operator++(int32_t) {
PointDVectorIterator copy(*this);
item_ += 1;
return copy;
}
PointDVectorIterator operator--(int32_t) {
PointDVectorIterator copy(*this);
item_ -= 1;
return copy;
}
PointDVectorIterator &operator=(const PointDVectorIterator &other) {
this->item_ = other.item_;
return *this;
}
bool operator==(const PointDVectorIterator &ref) const {
return item_ == ref.item_;
}
bool operator!=(const PointDVectorIterator &ref) const {
return item_ != ref.item_;
}
bool operator<(const PointDVectorIterator &ref) const {
return item_ < ref.item_;
}
bool operator>(const PointDVectorIterator &ref) const {
return item_ > ref.item_;
}
bool operator<=(const PointDVectorIterator &ref) const {
return item_ <= ref.item_;
}
bool operator>=(const PointDVectorIterator &ref) const {
return item_ >= ref.item_;
}
PointDVectorIterator operator+(const int32_t &add) const {
PointDVectorIterator copy(vec_, item_ + add);
return copy;
}
PointDVectorIterator &operator+=(const int32_t &add) {
item_ += add;
return *this;
}
PointDVectorIterator operator-(const int32_t &sub) const {
PointDVectorIterator copy(vec_, item_ - sub);
return copy;
}
size_t operator-(const PointDVectorIterator &sub) const {
return (item_ - sub.item_);
}
PointDVectorIterator &operator-=(const int32_t &sub) {
item_ -= sub;
return *this;
}
internal_t *operator[](const size_t &n) const {
return vec_->data0_ + (item_ + n) * dimensionality_;
}
protected:
explicit PointDVectorIterator(PointDVector *vec, size_t start_item)
: item_(start_item), vec_(vec), dimensionality_(vec->dimensionality_) {}
private:
size_t item_; // this counts the item that should be referenced.
PointDVector *const vec_; // the thing that we're iterating on
const uint32_t dimensionality_; // local copy from vec_
};
PointDVectorIterator begin() { return PointDVectorIterator(this, 0); }
PointDVectorIterator end() { return PointDVectorIterator(this, n_items_); }
// operator[] allows for unprotected user-side usage of operator[] on the
// return value AS IF it were a natively indexable type like Point3*
internal_t *operator[](const uint32_t index) {
DRACO_DCHECK_LT(index, n_items_);
return data0_ + index * dimensionality_;
}
const internal_t *operator[](const uint32_t index) const {
DRACO_DCHECK_LT(index, n_items_);
return data0_ + index * dimensionality_;
}
uint32_t size() const { return n_items_; }
size_t GetBufferSize() const { return data_.size(); }
// copy a single contiguous 'item' from one PointDVector into this one.
void CopyItem(const PointDVector &source, const internal_t source_index,
const internal_t destination_index) {
DRACO_DCHECK(&source != this ||
(&source == this && source_index != destination_index));
DRACO_DCHECK_LT(destination_index, n_items_);
DRACO_DCHECK_LT(source_index, source.n_items_);
// DRACO_DCHECK_EQ(source.n_items_, n_items_); // not technically necessary
DRACO_DCHECK_EQ(source.dimensionality_, dimensionality_);
const internal_t *ref = source[source_index];
internal_t *const dest = this->operator[](destination_index);
std::memcpy(dest, ref, item_size_bytes_);
}
// Copy data directly off of an attribute buffer interleaved into internal
// memory.
void CopyAttribute(
// The dimensionality of the attribute being integrated
const internal_t attribute_dimensionality,
// The offset in dimensions to insert this attribute.
const internal_t offset_dimensionality, const internal_t index,
// The direct pointer to the data
const void *const attribute_item_data) {
// chunk copy
const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
// a multiply and add can be optimized away with an iterator
std::memcpy(data0_ + index * dimensionality_ + offset_dimensionality,
attribute_item_data, copy_size);
}
// Copy data off of a contiguous buffer interleaved into internal memory
void CopyAttribute(
// The dimensionality of the attribute being integrated
const internal_t attribute_dimensionality,
// The offset in dimensions to insert this attribute.
const internal_t offset_dimensionality,
const internal_t *const attribute_mem) {
DRACO_DCHECK_LT(offset_dimensionality,
dimensionality_ - attribute_dimensionality);
// degenerate case block copy the whole buffer.
if (dimensionality_ == attribute_dimensionality) {
DRACO_DCHECK_EQ(offset_dimensionality, 0);
const size_t copy_size =
sizeof(internal_t) * attribute_dimensionality * n_items_;
std::memcpy(data0_, attribute_mem, copy_size);
} else { // chunk copy
const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
internal_t *internal_data;
const internal_t *attribute_data;
internal_t item;
for (internal_data = data0_ + offset_dimensionality,
attribute_data = attribute_mem, item = 0;
item < n_items_; internal_data += dimensionality_,
attribute_data += attribute_dimensionality, item += 1) {
std::memcpy(internal_data, attribute_data, copy_size);
}
}
}
private:
// internal parameters.
const uint32_t n_items_;
const uint32_t dimensionality_; // The dimension of the points in the buffer
const uint32_t item_size_bytes_;
std::vector<internal_t> data_; // contiguously stored data. Never resized.
internal_t *const data0_; // raw pointer to base data.
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_

View File

@ -0,0 +1,360 @@
// Copyright 2018 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/point_d_vector.h"
#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
#include "draco/core/draco_test_base.h"
namespace draco {
class PointDVectorTest : public ::testing::Test {
protected:
template <typename PT>
void TestIntegrity() {}
template <typename PT>
void TestSize() {
for (uint32_t n_items = 0; n_items <= 10; ++n_items) {
for (uint32_t dimensionality = 1; dimensionality <= 10;
++dimensionality) {
draco::PointDVector<PT> var(n_items, dimensionality);
ASSERT_EQ(n_items, var.size());
ASSERT_EQ(n_items * dimensionality, var.GetBufferSize());
}
}
}
template <typename PT>
void TestContentsContiguous() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestContentsDiscrete() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
for (PT item = 0; item < n_items; item += 1) {
var.CopyAttribute(att_dimensionality, offset_dimensionality, item,
attribute_data + item * att_dimensionality);
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestContentsCopy() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestIterator() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
auto V0 = var.begin();
auto VE = var.end();
auto D0 = dest.begin();
auto DE = dest.end();
while (V0 != VE && D0 != DE) {
ASSERT_EQ(*D0, *V0); // compare PseudoPointD
// verify elemental values
for (auto index = 0; index < dimensionality; index += 1) {
ASSERT_EQ((*D0)[index], (*V0)[index]);
}
++V0;
++D0;
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestPoint3Iterator() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
const uint32_t dimensionality = 3;
// for (uint32_t dimensionality = 1; dimensionality < 10;
// dimensionality += 2) {
const uint32_t att_dimensionality = 3;
// for (uint32_t att_dimensionality = 1;
// att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
std::vector<draco::Point3ui> att3(n_items);
for (PT val = 0; val < n_items; val += 1) {
att3[val][0] = val;
att3[val][1] = val;
att3[val][2] = val;
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
auto aV0 = att3.begin();
auto aVE = att3.end();
auto V0 = var.begin();
auto VE = var.end();
auto D0 = dest.begin();
auto DE = dest.end();
while (aV0 != aVE && V0 != VE && D0 != DE) {
ASSERT_EQ(*D0, *V0); // compare PseudoPointD
// verify elemental values
for (auto index = 0; index < dimensionality; index += 1) {
ASSERT_EQ((*D0)[index], (*V0)[index]);
ASSERT_EQ((*D0)[index], (*aV0)[index]);
ASSERT_EQ((*aV0)[index], (*V0)[index]);
}
++aV0;
++V0;
++D0;
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
void TestPseudoPointDSwap() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {10, 11, 12};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
ASSERT_EQ(val_src1[0], 0);
ASSERT_EQ(val_src1[1], 1);
ASSERT_EQ(val_src1[2], 2);
ASSERT_EQ(dest_src1[0], 10);
ASSERT_EQ(dest_src1[1], 11);
ASSERT_EQ(dest_src1[2], 12);
ASSERT_NE(val_src1, dest_src1);
swap(val_src1, dest_src1);
ASSERT_EQ(dest_src1[0], 0);
ASSERT_EQ(dest_src1[1], 1);
ASSERT_EQ(dest_src1[2], 2);
ASSERT_EQ(val_src1[0], 10);
ASSERT_EQ(val_src1[1], 11);
ASSERT_EQ(val_src1[2], 12);
ASSERT_NE(val_src1, dest_src1);
}
void TestPseudoPointDEquality() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {0, 1, 2};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
ASSERT_EQ(val_src1, val_src1);
ASSERT_EQ(val_src1, val_src2);
ASSERT_EQ(dest_src1, val_src1);
ASSERT_EQ(dest_src1, val_src2);
ASSERT_EQ(val_src2, val_src1);
ASSERT_EQ(val_src2, val_src2);
ASSERT_EQ(dest_src2, val_src1);
ASSERT_EQ(dest_src2, val_src2);
for (auto i = 0; i < 3; i++) {
ASSERT_EQ(val_src1[i], val_src1[i]);
ASSERT_EQ(val_src1[i], val_src2[i]);
ASSERT_EQ(dest_src1[i], val_src1[i]);
ASSERT_EQ(dest_src1[i], val_src2[i]);
ASSERT_EQ(val_src2[i], val_src1[i]);
ASSERT_EQ(val_src2[i], val_src2[i]);
ASSERT_EQ(dest_src2[i], val_src1[i]);
ASSERT_EQ(dest_src2[i], val_src2[i]);
}
}
void TestPseudoPointDInequality() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {1, 2, 3};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
ASSERT_EQ(val_src1, val_src1);
ASSERT_EQ(val_src1, val_src2);
ASSERT_NE(dest_src1, val_src1);
ASSERT_NE(dest_src1, val_src2);
ASSERT_EQ(val_src2, val_src1);
ASSERT_EQ(val_src2, val_src2);
ASSERT_NE(dest_src2, val_src1);
ASSERT_NE(dest_src2, val_src2);
for (auto i = 0; i < 3; i++) {
ASSERT_EQ(val_src1[i], val_src1[i]);
ASSERT_EQ(val_src1[i], val_src2[i]);
ASSERT_NE(dest_src1[i], val_src1[i]);
ASSERT_NE(dest_src1[i], val_src2[i]);
ASSERT_EQ(val_src2[i], val_src1[i]);
ASSERT_EQ(val_src2[i], val_src2[i]);
ASSERT_NE(dest_src2[i], val_src1[i]);
ASSERT_NE(dest_src2[i], val_src2[i]);
}
}
};
TEST_F(PointDVectorTest, VectorTest) {
TestSize<uint32_t>();
TestContentsDiscrete<uint32_t>();
TestContentsContiguous<uint32_t>();
TestContentsCopy<uint32_t>();
TestIterator<uint32_t>();
TestPoint3Iterator<uint32_t>();
}
TEST_F(PointDVectorTest, PseudoPointDTest) {
TestPseudoPointDSwap();
TestPseudoPointDEquality();
TestPseudoPointDInequality();
}
} // namespace draco

View File

@ -0,0 +1,63 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
#include <vector>
#include "draco/attributes/point_attribute.h"
namespace draco {
// Class for generating a sequence of point ids that can be used to encode
// or decode attribute values in a specific order.
// See sequential_attribute_encoders/decoders_controller.h for more details.
class PointsSequencer {
public:
PointsSequencer() : out_point_ids_(nullptr) {}
virtual ~PointsSequencer() = default;
// Fills the |out_point_ids| with the generated sequence of point ids.
bool GenerateSequence(std::vector<PointIndex> *out_point_ids) {
out_point_ids_ = out_point_ids;
return GenerateSequenceInternal();
}
// Appends a point to the sequence.
void AddPointId(PointIndex point_id) { out_point_ids_->push_back(point_id); }
// Sets the correct mapping between point ids and value ids. I.e., the inverse
// of the |out_point_ids|. In general, |out_point_ids_| does not contain
// sufficient information to compute the inverse map, because not all point
// ids are necessarily contained within the map.
// Must be implemented for sequencers that are used by attribute decoders.
virtual bool UpdatePointToAttributeIndexMapping(PointAttribute * /* attr */) {
return false;
}
protected:
// Method that needs to be implemented by the derived classes. The
// implementation is responsible for filling |out_point_ids_| with the valid
// sequence of point ids.
virtual bool GenerateSequenceInternal() = 0;
std::vector<PointIndex> *out_point_ids() const { return out_point_ids_; }
private:
std::vector<PointIndex> *out_point_ids_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_

View File

@ -0,0 +1,231 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
#include <algorithm>
#include <cmath>
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
#include "draco/core/varint_decoding.h"
#include "draco/draco_features.h"
namespace draco {
// Decoder for predictions encoded with the constrained multi-parallelogram
// encoder. See the corresponding encoder for more details about the prediction
// method.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeConstrainedMultiParallelogramDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
const PointAttribute *attribute)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
const PointAttribute *attribute, const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
bool DecodePredictionData(DecoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
private:
typedef constrained_multi_parallelogram::Mode Mode;
static constexpr int kMaxNumParallelograms =
constrained_multi_parallelogram::kMaxNumParallelograms;
// Crease edges are used to store whether any given edge should be used for
// parallelogram prediction or not. New values are added in the order in which
// the edges are processed. For better compression, the flags are stored in
// in separate contexts based on the number of available parallelograms at a
// given vertex.
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
Mode selected_mode_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>::
ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int /* size */, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(num_components);
// Predicted values for all simple parallelograms encountered at any given
// vertex.
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
for (int i = 0; i < kMaxNumParallelograms; ++i) {
pred_vals[i].resize(num_components, 0);
}
this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr,
out_data);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// Current position in the |is_crease_edge_| array for each context.
std::vector<int> is_crease_edge_pos(kMaxNumParallelograms, 0);
// Used to store predicted value for multi-parallelogram prediction.
std::vector<DataTypeT> multi_pred_vals(num_components);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
bool first_pass = true;
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, out_data,
num_components, &(pred_vals[num_parallelograms][0]))) {
// Parallelogram prediction applied and stored in
// |pred_vals[num_parallelograms]|
++num_parallelograms;
// Stop processing when we reach the maximum number of allowed
// parallelograms.
if (num_parallelograms == kMaxNumParallelograms) {
break;
}
}
// Proceed to the next corner attached to the vertex. First swing left
// and if we reach a boundary, swing right from the start corner.
if (first_pass) {
corner_id = table->SwingLeft(corner_id);
} else {
corner_id = table->SwingRight(corner_id);
}
if (corner_id == start_corner_id) {
break;
}
if (corner_id == kInvalidCornerIndex && first_pass) {
first_pass = false;
corner_id = table->SwingRight(start_corner_id);
}
}
// Check which of the available parallelograms are actually used and compute
// the final predicted value.
int num_used_parallelograms = 0;
if (num_parallelograms > 0) {
for (int i = 0; i < num_components; ++i) {
multi_pred_vals[i] = 0;
}
// Check which parallelograms are actually used.
for (int i = 0; i < num_parallelograms; ++i) {
const int context = num_parallelograms - 1;
const int pos = is_crease_edge_pos[context]++;
if (is_crease_edge_[context].size() <= pos) {
return false;
}
const bool is_crease = is_crease_edge_[context][pos];
if (!is_crease) {
++num_used_parallelograms;
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] += pred_vals[i][j];
}
}
}
}
const int dst_offset = p * num_components;
if (num_used_parallelograms == 0) {
// No parallelogram was valid.
// We use the last decoded point as a reference.
const int src_offset = (p - 1) * num_components;
this->transform().ComputeOriginalValue(
out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
} else {
// Compute the correction from the predicted value.
for (int c = 0; c < num_components; ++c) {
multi_pred_vals[c] /= num_used_parallelograms;
}
this->transform().ComputeOriginalValue(
multi_pred_vals.data(), in_corr + dst_offset, out_data + dst_offset);
}
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
*buffer) {
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
// Decode prediction mode.
uint8_t mode;
if (!buffer->Decode(&mode)) {
return false;
}
if (mode != Mode::OPTIMAL_MULTI_PARALLELOGRAM) {
// Unsupported mode.
return false;
}
}
#endif
// Encode selected edges using separate rans bit coder for each context.
for (int i = 0; i < kMaxNumParallelograms; ++i) {
uint32_t num_flags;
if (!DecodeVarint<uint32_t>(&num_flags, buffer)) {
return false;
}
if (num_flags > 0) {
is_crease_edge_[i].resize(num_flags);
RAnsBitDecoder decoder;
if (!decoder.StartDecoding(buffer)) {
return false;
}
for (uint32_t j = 0; j < num_flags; ++j) {
is_crease_edge_[i][j] = decoder.DecodeNextBit();
}
decoder.EndDecoding();
}
}
return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::DecodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_

View File

@ -0,0 +1,414 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
#include <algorithm>
#include <cmath>
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
#include "draco/compression/bit_coders/rans_bit_encoder.h"
#include "draco/compression/entropy/shannon_entropy.h"
#include "draco/core/varint_encoding.h"
namespace draco {
// Compared to standard multi-parallelogram, constrained multi-parallelogram can
// explicitly select which of the available parallelograms are going to be used
// for the prediction by marking crease edges between two triangles. This
// requires storing extra data, but it allows the predictor to avoid using
// parallelograms that would lead to poor predictions. For improved efficiency,
// our current implementation limits the maximum number of used parallelograms
// to four, which covers >95% of the cases (on average, there are only two
// parallelograms available for any given vertex).
// All bits of the explicitly chosen configuration are stored together in a
// single context chosen by the total number of parallelograms available to
// choose from.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeConstrainedMultiParallelogramEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
const PointAttribute *attribute)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
const PointAttribute *attribute, const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
bool EncodePredictionData(EncoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
private:
// Function used to compute number of bits needed to store overhead of the
// predictor. In this case, we consider overhead to be all bits that mark
// whether a parallelogram should be used for prediction or not. The input
// to this method is the total number of parallelograms that were evaluated so
// far(total_parallelogram), and the number of parallelograms we decided to
// use for prediction (total_used_parallelograms).
// Returns number of bits required to store the overhead.
int64_t ComputeOverheadBits(int64_t total_used_parallelograms,
int64_t total_parallelogram) const {
// For now we assume RAns coding for the bits where the total required size
// is directly correlated to the binary entropy of the input stream.
// TODO(ostava): This should be generalized in case we use other binary
// coding scheme.
const double entropy = ComputeBinaryShannonEntropy(
static_cast<uint32_t>(total_parallelogram),
static_cast<uint32_t>(total_used_parallelograms));
// Round up to the nearest full bit.
return static_cast<int64_t>(
ceil(static_cast<double>(total_parallelogram) * entropy));
}
// Struct that contains data used for measuring the error of each available
// parallelogram configuration.
struct Error {
Error() : num_bits(0), residual_error(0) {}
// Primary metric: number of bits required to store the data as a result of
// the selected prediction configuration.
int num_bits;
// Secondary metric: absolute difference of residuals for the given
// configuration.
int residual_error;
bool operator<(const Error &e) const {
if (num_bits < e.num_bits) {
return true;
}
if (num_bits > e.num_bits) {
return false;
}
return residual_error < e.residual_error;
}
};
// Computes error for predicting |predicted_val| instead of |actual_val|.
// Error is computed as the number of bits needed to encode the difference
// between the values.
Error ComputeError(const DataTypeT *predicted_val,
const DataTypeT *actual_val, int *out_residuals,
int num_components) {
Error error;
for (int i = 0; i < num_components; ++i) {
const int dif = (predicted_val[i] - actual_val[i]);
error.residual_error += std::abs(dif);
out_residuals[i] = dif;
// Entropy needs unsigned symbols, so convert the signed difference to an
// unsigned symbol.
entropy_symbols_[i] = ConvertSignedIntToSymbol(dif);
}
// Generate entropy data for case that this configuration was used.
// Note that the entropy stream is NOT updated in this case.
const auto entropy_data =
entropy_tracker_.Peek(entropy_symbols_.data(), num_components);
error.num_bits = entropy_tracker_.GetNumberOfDataBits(entropy_data) +
entropy_tracker_.GetNumberOfRAnsTableBits(entropy_data);
return error;
}
typedef constrained_multi_parallelogram::Mode Mode;
static constexpr int kMaxNumParallelograms =
constrained_multi_parallelogram::kMaxNumParallelograms;
// Crease edges are used to store whether any given edge should be used for
// parallelogram prediction or not. New values are added in the order in which
// the edges are processed. For better compression, the flags are stored in
// in separate contexts based on the number of available parallelograms at a
// given vertex.
// TODO(draco-eng) reconsider std::vector<bool> (performance/space).
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
Mode selected_mode_;
ShannonEntropyTracker entropy_tracker_;
// Temporary storage for symbols that are fed into the |entropy_stream|.
// Always contains only |num_components| entries.
std::vector<uint32_t> entropy_symbols_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(in_data, size, num_components);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// Predicted values for all simple parallelograms encountered at any given
// vertex.
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
for (int i = 0; i < kMaxNumParallelograms; ++i) {
pred_vals[i].resize(num_components);
}
// Used to store predicted value for various multi-parallelogram predictions
// (combinations of simple parallelogram predictions).
std::vector<DataTypeT> multi_pred_vals(num_components);
entropy_symbols_.resize(num_components);
// Struct for holding data about prediction configuration for different sets
// of used parallelograms.
struct PredictionConfiguration {
PredictionConfiguration()
: error(), configuration(0), num_used_parallelograms(0) {}
Error error;
uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it.
int num_used_parallelograms;
std::vector<DataTypeT> predicted_value;
std::vector<int32_t> residuals;
};
// Bit-field used for computing permutations of excluded edges
// (parallelograms).
bool exluded_parallelograms[kMaxNumParallelograms];
// Data about the number of used parallelogram and total number of available
// parallelogram for each context. Used to compute overhead needed for storing
// the parallelogram choices made by the encoder.
int64_t total_used_parallelograms[kMaxNumParallelograms] = {0};
int64_t total_parallelograms[kMaxNumParallelograms] = {0};
std::vector<int> current_residuals(num_components);
// We start processing the vertices from the end because this prediction uses
// data from previous entries that could be overwritten when an entry is
// processed.
for (int p =
static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1;
p > 0; --p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
// Go over all corners attached to the vertex and compute the predicted
// value from the parallelograms defined by their opposite faces.
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
bool first_pass = true;
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, in_data, num_components,
&(pred_vals[num_parallelograms][0]))) {
// Parallelogram prediction applied and stored in
// |pred_vals[num_parallelograms]|
++num_parallelograms;
// Stop processing when we reach the maximum number of allowed
// parallelograms.
if (num_parallelograms == kMaxNumParallelograms) {
break;
}
}
// Proceed to the next corner attached to the vertex. First swing left
// and if we reach a boundary, swing right from the start corner.
if (first_pass) {
corner_id = table->SwingLeft(corner_id);
} else {
corner_id = table->SwingRight(corner_id);
}
if (corner_id == start_corner_id) {
break;
}
if (corner_id == kInvalidCornerIndex && first_pass) {
first_pass = false;
corner_id = table->SwingRight(start_corner_id);
}
}
// Offset to the target (destination) vertex.
const int dst_offset = p * num_components;
Error error;
// Compute all prediction errors for all possible configurations of
// available parallelograms.
// Variable for holding the best configuration that has been found so far.
PredictionConfiguration best_prediction;
// Compute delta coding error (configuration when no parallelogram is
// selected).
const int src_offset = (p - 1) * num_components;
error = ComputeError(in_data + src_offset, in_data + dst_offset,
&current_residuals[0], num_components);
if (num_parallelograms > 0) {
total_parallelograms[num_parallelograms - 1] += num_parallelograms;
const int64_t new_overhead_bits =
ComputeOverheadBits(total_used_parallelograms[num_parallelograms - 1],
total_parallelograms[num_parallelograms - 1]);
error.num_bits += new_overhead_bits;
}
best_prediction.error = error;
best_prediction.configuration = 0;
best_prediction.num_used_parallelograms = 0;
best_prediction.predicted_value.assign(
in_data + src_offset, in_data + src_offset + num_components);
best_prediction.residuals.assign(current_residuals.begin(),
current_residuals.end());
// Compute prediction error for different cases of used parallelograms.
for (int num_used_parallelograms = 1;
num_used_parallelograms <= num_parallelograms;
++num_used_parallelograms) {
// Mark all parallelograms as excluded.
std::fill(exluded_parallelograms,
exluded_parallelograms + num_parallelograms, true);
// TODO(draco-eng) maybe this should be another std::fill.
// Mark the first |num_used_parallelograms| as not excluded.
for (int j = 0; j < num_used_parallelograms; ++j) {
exluded_parallelograms[j] = false;
}
// Permute over the excluded edges and compute error for each
// configuration (permutation of excluded parallelograms).
do {
// Reset the multi-parallelogram predicted values.
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] = 0;
}
uint8_t configuration = 0;
for (int j = 0; j < num_parallelograms; ++j) {
if (exluded_parallelograms[j]) {
continue;
}
for (int c = 0; c < num_components; ++c) {
multi_pred_vals[c] += pred_vals[j][c];
}
// Set jth bit of the configuration.
configuration |= (1 << j);
}
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] /= num_used_parallelograms;
}
error = ComputeError(multi_pred_vals.data(), in_data + dst_offset,
&current_residuals[0], num_components);
if (num_parallelograms > 0) {
const int64_t new_overhead_bits = ComputeOverheadBits(
total_used_parallelograms[num_parallelograms - 1] +
num_used_parallelograms,
total_parallelograms[num_parallelograms - 1]);
// Add overhead bits to the total error.
error.num_bits += new_overhead_bits;
}
if (error < best_prediction.error) {
best_prediction.error = error;
best_prediction.configuration = configuration;
best_prediction.num_used_parallelograms = num_used_parallelograms;
best_prediction.predicted_value.assign(multi_pred_vals.begin(),
multi_pred_vals.end());
best_prediction.residuals.assign(current_residuals.begin(),
current_residuals.end());
}
} while (std::next_permutation(
exluded_parallelograms, exluded_parallelograms + num_parallelograms));
}
if (num_parallelograms > 0) {
total_used_parallelograms[num_parallelograms - 1] +=
best_prediction.num_used_parallelograms;
}
// Update the entropy stream by adding selected residuals as symbols to the
// stream.
for (int i = 0; i < num_components; ++i) {
entropy_symbols_[i] =
ConvertSignedIntToSymbol(best_prediction.residuals[i]);
}
entropy_tracker_.Push(entropy_symbols_.data(), num_components);
for (int i = 0; i < num_parallelograms; ++i) {
if ((best_prediction.configuration & (1 << i)) == 0) {
// Parallelogram not used, mark the edge as crease.
is_crease_edge_[num_parallelograms - 1].push_back(true);
} else {
// Parallelogram used. Add it to the predicted value and mark the
// edge as not a crease.
is_crease_edge_[num_parallelograms - 1].push_back(false);
}
}
this->transform().ComputeCorrection(in_data + dst_offset,
best_prediction.predicted_value.data(),
out_corr + dst_offset);
}
// First element is always fixed because it cannot be predicted.
for (int i = 0; i < num_components; ++i) {
pred_vals[0][i] = static_cast<DataTypeT>(0);
}
this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr);
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
*buffer) {
// Encode selected edges using separate rans bit coder for each context.
for (int i = 0; i < kMaxNumParallelograms; ++i) {
// |i| is the context based on the number of available parallelograms, which
// is always equal to |i + 1|.
const int num_used_parallelograms = i + 1;
EncodeVarint<uint32_t>(is_crease_edge_[i].size(), buffer);
if (is_crease_edge_[i].size()) {
RAnsBitEncoder encoder;
encoder.StartEncoding();
// Encode the crease edge flags in the reverse vertex order that is needed
// be the decoder. Note that for the currently supported mode, each vertex
// has exactly |num_used_parallelograms| edges that need to be encoded.
for (int j = static_cast<int>(is_crease_edge_[i].size()) -
num_used_parallelograms;
j >= 0; j -= num_used_parallelograms) {
// Go over all edges of the current vertex.
for (int k = 0; k < num_used_parallelograms; ++k) {
encoder.EncodeBit(is_crease_edge_[i][j + k]);
}
}
encoder.EndEncoding(buffer);
}
}
return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::EncodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_

Some files were not shown because too many files have changed in this diff Show More