diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index e0c2bec9e..35448d4dc 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,2 +1,2 @@ patreon: assimp -custom: https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=4JRJVPXC4QJM4 +open_collective: assimp diff --git a/.github/workflows/ccpp.yml b/.github/workflows/ccpp.yml index f29e2e500..510ae8e7f 100644 --- a/.github/workflows/ccpp.yml +++ b/.github/workflows/ccpp.yml @@ -67,7 +67,13 @@ jobs: uses: actions/checkout@v2 with: repository: cpp-pm/polly - path: cmake/polly + path: cmake/polly + + - name: Remove contrib directory for Hunter builds + if: contains(matrix.name, 'hunter') + uses: JesseTG/rm@v1.0.2 + with: + path: contrib - name: Cache DX SDK id: dxcache diff --git a/.gitignore b/.gitignore index fe59f9a70..f9c1a490f 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,9 @@ build *.VC.db-wal *.VC.opendb *.ipch +.vs/ +out/ +CMakeSettings.json # Output bin/ diff --git a/Build.md b/Build.md index 4b7513313..b4d1bdad0 100644 --- a/Build.md +++ b/Build.md @@ -1,6 +1,6 @@ -# Build Instructions +# Build / Install Instructions -## Build on all platforms using vcpkg +## Install on all platforms using vcpkg You can download and install assimp using the [vcpkg](https://github.com/Microsoft/vcpkg/) dependency manager: ```bash git clone https://github.com/Microsoft/vcpkg.git @@ -11,6 +11,18 @@ You can download and install assimp using the [vcpkg](https://github.com/Microso ``` The assimp port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. +## Install on Ubuntu +You can install the Asset-Importer-Lib via apt: +``` +sudo apt-get install assimp +``` + +## Install pyassimp +You need to have pip installed: +``` +pip install pyassimp +``` + ## Manual build instructions ### Install CMake @@ -24,6 +36,12 @@ Make sure you have a working git-installation. Open a command prompt and clone t ```bash git clone https://github.com/assimp/assimp.git ``` +### Build from source: +```bash +cd assimp +cmake CMakeLists.txt +cmake --build . +``` ### Build instructions for Windows with Visual-Studio diff --git a/CMakeLists.txt b/CMakeLists.txt index 4b553ef9e..834f41a29 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,7 +38,7 @@ SET(CMAKE_POLICY_DEFAULT_CMP0012 NEW) SET(CMAKE_POLICY_DEFAULT_CMP0074 NEW) SET(CMAKE_POLICY_DEFAULT_CMP0092 NEW) -CMAKE_MINIMUM_REQUIRED( VERSION 3.0 ) +CMAKE_MINIMUM_REQUIRED( VERSION 3.10 ) # Toggles the use of the hunter package manager option(ASSIMP_HUNTER_ENABLED "Enable Hunter package manager support" OFF) @@ -46,8 +46,8 @@ option(ASSIMP_HUNTER_ENABLED "Enable Hunter package manager support" OFF) IF(ASSIMP_HUNTER_ENABLED) include("cmake/HunterGate.cmake") HunterGate( - URL "https://github.com/cpp-pm/hunter/archive/v0.23.269.tar.gz" - SHA1 "64024b7b95b4c86d50ae05b926814448c93a70a0" + URL "https://github.com/cpp-pm/hunter/archive/v0.23.293.tar.gz" + SHA1 "e8e5470652db77149d9b38656db2a6c0b7642693" ) add_definitions(-DASSIMP_USE_HUNTER) @@ -61,7 +61,6 @@ OPTION( BUILD_SHARED_LIBS "Build package with shared libraries." ON ) - OPTION( ASSIMP_BUILD_FRAMEWORK "Build package as Mac OS X Framework bundle." OFF @@ -133,9 +132,22 @@ OPTION ( ASSIMP_IGNORE_GIT_HASH ) IF ( WIN32 ) - OPTION ( ASSIMP_BUILD_ASSIMP_VIEW - "If the Assimp view tool is built. (requires DirectX)" - OFF ) + # Use subset of Windows.h + ADD_DEFINITIONS( -DWIN32_LEAN_AND_MEAN ) + + OPTION ( ASSIMP_BUILD_ASSIMP_VIEW + "If the Assimp view tool is built. (requires DirectX)" + OFF ) + + IF(MSVC) + OPTION( ASSIMP_INSTALL_PDB + "Install MSVC debug files." + ON ) + IF(NOT (MSVC_VERSION LESS 1900)) + # Multibyte character set is deprecated since at least MSVC2015 (possibly earlier) + ADD_DEFINITIONS( -DUNICODE -D_UNICODE ) + ENDIF() + ENDIF() ENDIF() IF (IOS AND NOT ASSIMP_HUNTER_ENABLED) @@ -145,21 +157,6 @@ IF (IOS AND NOT ASSIMP_HUNTER_ENABLED) ADD_DEFINITIONS(-DENABLE_BITCODE) ENDIF () -# Use subset of Windows.h -if (WIN32) - ADD_DEFINITIONS( -DWIN32_LEAN_AND_MEAN ) -endif() - -IF(MSVC) - OPTION( ASSIMP_INSTALL_PDB - "Install MSVC debug files." - ON - ) - IF(NOT (MSVC_VERSION LESS 1900)) - # Multibyte character set is deprecated since at least MSVC2015 (possibly earlier) - ADD_DEFINITIONS( -DUNICODE -D_UNICODE ) - ENDIF() -ENDIF() IF (ASSIMP_BUILD_FRAMEWORK) SET (BUILD_SHARED_LIBS ON) @@ -455,6 +452,12 @@ IF(ASSIMP_HUNTER_ENABLED) set(ZLIB_LIBRARIES ZLIB::zlib) set(ASSIMP_BUILD_MINIZIP TRUE) ELSE() + # If the zlib is already found outside, add an export in case assimpTargets can't find it. + IF( ZLIB_FOUND ) + INSTALL( TARGETS zlib + EXPORT "${TARGETS_EXPORT_NAME}") + ENDIF() + IF ( NOT ASSIMP_BUILD_ZLIB ) FIND_PACKAGE(ZLIB) ENDIF() @@ -570,6 +573,94 @@ ELSE () ADD_DEFINITIONS( -DASSIMP_BUILD_NO_C4D_IMPORTER ) ENDIF () +# Draco requires cmake 3.12 +IF (DEFINED CMAKE_VERSION AND "${CMAKE_VERSION}" VERSION_LESS "3.12") + message(NOTICE "draco requires cmake 3.12 or newer, cmake is ${CMAKE_VERSION} . Draco is disabled") + SET ( ASSIMP_BUILD_DRACO OFF CACHE BOOL "Disabled: Draco requires newer cmake" FORCE ) +ELSE() + OPTION ( ASSIMP_BUILD_DRACO "If the Draco libraries are to be built. Primarily for glTF" ON ) + IF ( ASSIMP_BUILD_DRACO ) + # Primarily for glTF v2 + # Enable Draco glTF feature set + set(DRACO_GLTF ON CACHE BOOL "" FORCE) + # Disable unnecessary or omitted components + set(DRACO_JS_GLUE OFF CACHE BOOL "" FORCE) + set(DRACO_WASM OFF CACHE BOOL "" FORCE) + set(DRACO_MAYA_PLUGIN OFF CACHE BOOL "" FORCE) + set(DRACO_UNITY_PLUGIN OFF CACHE BOOL "" FORCE) + set(DRACO_TESTS OFF CACHE BOOL "" FORCE) + + IF(ASSIMP_HUNTER_ENABLED) + hunter_add_package(draco) + find_package(draco CONFIG REQUIRED) + set(draco_LIBRARIES draco::draco) + ELSE() + # Draco 1.4.1 has many warnings and will not build with /WX or -Werror + # See https://github.com/google/draco/issues/672 + # and https://github.com/google/draco/issues/673 + IF(MSVC) + set(DRACO_CXX_FLAGS "/W0") + ELSE() + list(APPEND DRACO_CXX_FLAGS + "-Wno-bool-compare" + "-Wno-comment" + "-Wno-maybe-uninitialized" + "-Wno-sign-compare" + "-Wno-unused-local-typedefs" + ) + # Draco 1.4.1 does not explicitly export any symbols under GCC/clang + list(APPEND DRACO_CXX_FLAGS + "-fvisibility=default" + ) + ENDIF() + + # Don't build or install all of Draco by default + ADD_SUBDIRECTORY( "contrib/draco" EXCLUDE_FROM_ALL ) + + if(MSVC OR WIN32) + set(draco_LIBRARIES "draco") + else() + if(BUILD_SHARED_LIBS) + set(draco_LIBRARIES "draco_shared") + else() + set(draco_LIBRARIES "draco_static") + endif() + endif() + + # Don't build the draco command-line tools by default + set_target_properties(draco_encoder draco_decoder PROPERTIES + EXCLUDE_FROM_ALL TRUE + EXCLUDE_FROM_DEFAULT_BUILD TRUE + ) + + # Do build the draco shared library + set_target_properties(${draco_LIBRARIES} PROPERTIES + EXCLUDE_FROM_ALL FALSE + EXCLUDE_FROM_DEFAULT_BUILD FALSE + ) + + TARGET_USE_COMMON_OUTPUT_DIRECTORY(${draco_LIBRARIES}) + TARGET_USE_COMMON_OUTPUT_DIRECTORY(draco_encoder) + TARGET_USE_COMMON_OUTPUT_DIRECTORY(draco_decoder) + + set(draco_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/contrib/draco/src") + + # This is probably wrong + INSTALL( TARGETS ${draco_LIBRARIES} + EXPORT "${TARGETS_EXPORT_NAME}" + LIBRARY DESTINATION ${ASSIMP_LIB_INSTALL_DIR} + ARCHIVE DESTINATION ${ASSIMP_LIB_INSTALL_DIR} + RUNTIME DESTINATION ${ASSIMP_BIN_INSTALL_DIR} + FRAMEWORK DESTINATION ${ASSIMP_LIB_INSTALL_DIR} + COMPONENT ${LIBASSIMP_COMPONENT} + INCLUDES DESTINATION include + ) + + ENDIF() + ENDIF() +ENDIF() + +# Main assimp code ADD_SUBDIRECTORY( code/ ) IF ( ASSIMP_BUILD_ASSIMP_TOOLS ) # The viewer for windows only @@ -583,7 +674,7 @@ IF ( ASSIMP_BUILD_ASSIMP_TOOLS ) ADD_SUBDIRECTORY( tools/assimp_cmd/ ) ENDIF () -IF ( ASSIMP_BUILD_SAMPLES) +IF ( ASSIMP_BUILD_SAMPLES ) SET( SAMPLES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/samples ) SET( SAMPLES_SHARED_CODE_DIR ${SAMPLES_DIR}/SharedCode ) IF ( WIN32 ) diff --git a/Readme.md b/Readme.md index c6212bcc0..71b3c7f10 100644 --- a/Readme.md +++ b/Readme.md @@ -45,6 +45,7 @@ Take a look into the https://github.com/assimp/assimp/blob/master/Build.md file. * [Unity 3d Plugin](https://www.assetstore.unity3d.com/en/#!/content/91777) * [JVM](https://github.com/kotlin-graphics/assimp) Full jvm port (current [status](https://github.com/kotlin-graphics/assimp/wiki/Status)) * [HAXE-Port](https://github.com/longde123/assimp-haxe) The Assimp-HAXE-port. +* [Rust](https://github.com/jkvargas/russimp) ### Other tools ### [open3mod](https://github.com/acgessler/open3mod) is a powerful 3D model viewer based on Assimp's import and export abilities. @@ -105,12 +106,6 @@ Become a financial contributor and help us sustain our community. [[Contribute]( Monthly donations via Patreon:
[![Patreon](https://cloud.githubusercontent.com/assets/8225057/5990484/70413560-a9ab-11e4-8942-1a63607c0b00.png)](http://www.patreon.com/assimp) -
- -One-off donations via PayPal: -
[![PayPal](https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=4JRJVPXC4QJM4) - -
#### Organizations diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 4fb79dd69..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,81 +0,0 @@ -# AppVeyor file -# http://www.appveyor.com/docs/appveyor-yml - -# clone directory -clone_folder: c:\projects\assimp - -clone_depth: 1 - -# branches to build -branches: - # whitelist - only: - - master - -matrix: - fast_finish: true - -image: - - Visual Studio 2013 - #- Visual Studio 2015 - #- Visual Studio 2017 - - Visual Studio 2019 - #- MinGW - -platform: - - Win32 - - x64 - -configuration: Release - -install: - - set PATH=C:\Ruby24-x64\bin;%PATH% - - set CMAKE_DEFINES -DASSIMP_WERROR=ON - - if [%COMPILER%]==[MinGW] set PATH=C:\MinGW\bin;%PATH% - - if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2013" set CMAKE_GENERATOR_NAME=Visual Studio 12 2013 - - if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2015" set CMAKE_GENERATOR_NAME=Visual Studio 14 2015 - - if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" set CMAKE_GENERATOR_NAME=Visual Studio 15 2017 - - if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" set CMAKE_GENERATOR_NAME=Visual Studio 16 2019 - - cmake %CMAKE_DEFINES% -G "%CMAKE_GENERATOR_NAME%" -A %platform% . - # Rename sh.exe as sh.exe in PATH interferes with MinGW - if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2015" set CMAKE_GENERATOR_NAME=Visual Studio 14 2015 - - - rename "C:\Program Files\Git\usr\bin\sh.exe" "sh2.exe" - - set PATH=%PATH%;"C:\\Program Files (x86)\\Inno Setup 5" - - ps: Invoke-WebRequest -Uri https://download.microsoft.com/download/5/7/b/57b2947c-7221-4f33-b35e-2fc78cb10df4/vc_redist.x64.exe -OutFile .\packaging\windows-innosetup\vc_redist.x64.exe - - ps: Invoke-WebRequest -Uri https://download.microsoft.com/download/1/d/8/1d8137db-b5bb-4925-8c5d-927424a2e4de/vc_redist.x86.exe -OutFile .\packaging\windows-innosetup\vc_redist.x86.exe - -cache: - - code\assimp.dir\%CONFIGURATION% - - contrib\zlib\zlibstatic.dir\%CONFIGURATION% - - contrib\zlib\zlib.dir\%CONFIGURATION% - - tools\assimp_cmd\assimp_cmd.dir\%CONFIGURATION% - - tools\assimp_view\assimp_viewer.dir\%CONFIGURATION% - - test\unit.dir\%CONFIGURATION% - - bin\.mtime_cache - -before_build: - - echo NUMBER_OF_PROCESSORS=%NUMBER_OF_PROCESSORS% - - ruby scripts\AppVeyor\mtime_cache -g scripts\AppVeyor\cacheglobs.txt -c bin\.mtime_cache\cache.json - -build_script: - cmake --build . --config Release -- /maxcpucount:2 - -after_build: - - if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" ( - if "%platform%"=="x64" ( - iscc packaging\windows-innosetup\script_x64.iss - ) else ( - iscc packaging\windows-innosetup\script_x86.iss - ) - ) - - 7z a assimp.7z bin\%CONFIGURATION%\* lib\%CONFIGURATION%\* - -test_script: - - cmd: bin\%CONFIGURATION%\unit.exe --gtest_output=xml:testout.xml - -on_finish: - - ps: (new-object net.webclient).UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\testout.xml)) - -artifacts: - - path: assimp.7z - name: assimp_lib diff --git a/cmake/assimp-hunter-config.cmake.in b/cmake/assimp-hunter-config.cmake.in index b5283f4fb..91efcbf24 100644 --- a/cmake/assimp-hunter-config.cmake.in +++ b/cmake/assimp-hunter-config.cmake.in @@ -10,5 +10,9 @@ find_package(polyclipping CONFIG REQUIRED) find_package(zip CONFIG REQUIRED) find_package(pugixml CONFIG REQUIRED) +if(@ASSIMP_BUILD_DRACO@) + find_package(draco CONFIG REQUIRED) +endif() + include("${CMAKE_CURRENT_LIST_DIR}/@TARGETS_EXPORT_NAME@.cmake") check_required_components("@PROJECT_NAME@") diff --git a/code/AssetLib/3DS/3DSLoader.cpp b/code/AssetLib/3DS/3DSLoader.cpp index 4c24394fb..c041df0a3 100644 --- a/code/AssetLib/3DS/3DSLoader.cpp +++ b/code/AssetLib/3DS/3DSLoader.cpp @@ -266,6 +266,7 @@ void Discreet3DSImporter::ParseMainChunk() { case Discreet3DS::CHUNK_PRJ: bIsPrj = true; + break; case Discreet3DS::CHUNK_MAIN: ParseEditorChunk(); break; diff --git a/code/AssetLib/3MF/D3MFImporter.cpp b/code/AssetLib/3MF/D3MFImporter.cpp index 66b2c965b..4ee8946f1 100644 --- a/code/AssetLib/3MF/D3MFImporter.cpp +++ b/code/AssetLib/3MF/D3MFImporter.cpp @@ -427,7 +427,7 @@ private: aiFace face = ReadTriangle(currentNode); faces.push_back(face); - int pid, p1; + int pid = 0, p1; bool hasPid = getNodeAttribute(currentNode, D3MF::XmlTag::pid, pid); bool hasP1 = getNodeAttribute(currentNode, D3MF::XmlTag::p1, p1); diff --git a/code/AssetLib/AMF/AMFImporter_Geometry.cpp b/code/AssetLib/AMF/AMFImporter_Geometry.cpp index 7afe52311..51670072c 100644 --- a/code/AssetLib/AMF/AMFImporter_Geometry.cpp +++ b/code/AssetLib/AMF/AMFImporter_Geometry.cpp @@ -194,7 +194,7 @@ void AMFImporter::ParseNode_Coordinates(XmlNode &node) { // // diff --git a/code/AssetLib/AMF/AMFImporter_Node.hpp b/code/AssetLib/AMF/AMFImporter_Node.hpp index 81e0312b6..c757d56e0 100644 --- a/code/AssetLib/AMF/AMFImporter_Node.hpp +++ b/code/AssetLib/AMF/AMFImporter_Node.hpp @@ -240,7 +240,7 @@ struct AMFVertices : public AMFNodeElementBase { /// Structure that define volume node. struct AMFVolume : public AMFNodeElementBase { std::string MaterialID; ///< Which material to use. - std::string Type; ///< What this volume describes can be “region” or “support”. If none specified, “object” is assumed. + std::string Type; ///< What this volume describes can be "region" or "support". If none specified, "object" is assumed. /// Constructor. /// \param [in] pParent - pointer to parent node. diff --git a/code/AssetLib/AMF/AMFImporter_Postprocess.cpp b/code/AssetLib/AMF/AMFImporter_Postprocess.cpp index 98151d1c0..0236ab557 100644 --- a/code/AssetLib/AMF/AMFImporter_Postprocess.cpp +++ b/code/AssetLib/AMF/AMFImporter_Postprocess.cpp @@ -329,8 +329,8 @@ void AMFImporter::Postprocess_AddMetadata(const AMFMetaDataArray &metadataList, sceneNode.mMetaData = aiMetadata::Alloc(static_cast(metadataList.size())); size_t meta_idx(0); - for (const AMFMetadata &metadata : metadataList) { - sceneNode.mMetaData->Set(static_cast(meta_idx++), metadata.Type, aiString(metadata.Value)); + for (const AMFMetadata *metadata : metadataList) { + sceneNode.mMetaData->Set(static_cast(meta_idx++), metadata->Type, aiString(metadata->Value)); } } diff --git a/code/AssetLib/COB/COBLoader.cpp b/code/AssetLib/COB/COBLoader.cpp index 80b41143e..a97ce8ea9 100644 --- a/code/AssetLib/COB/COBLoader.cpp +++ b/code/AssetLib/COB/COBLoader.cpp @@ -44,6 +44,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef ASSIMP_BUILD_NO_COB_IMPORTER + #include "AssetLib/COB/COBLoader.h" #include "AssetLib/COB/COBScene.h" #include "PostProcessing/ConvertToLHProcess.h" @@ -90,11 +91,15 @@ static const aiImporterDesc desc = { // ------------------------------------------------------------------------------------------------ // Constructor to be privately used by Importer -COBImporter::COBImporter() {} +COBImporter::COBImporter() { + // empty +} // ------------------------------------------------------------------------------------------------ // Destructor, private as well -COBImporter::~COBImporter() {} +COBImporter::~COBImporter() { + // empty +} // ------------------------------------------------------------------------------------------------ // Returns whether the class can handle the format of the given file. @@ -466,8 +471,9 @@ void COBImporter::UnsupportedChunk_Ascii(LineSplitter &splitter, const ChunkInfo // missing the next line. splitter.get_stream().IncPtr(nfo.size); splitter.swallow_next_increment(); - } else + } else { ThrowException(error); + } } // ------------------------------------------------------------------------------------------------ @@ -790,25 +796,12 @@ void COBImporter::ReadBitM_Ascii(Scene & /*out*/, LineSplitter &splitter, const if (nfo.version > 1) { return UnsupportedChunk_Ascii(splitter, nfo, "BitM"); } - /* - "\nThumbNailHdrSize %ld" - "\nThumbHeader: %02hx 02hx %02hx " - "\nColorBufSize %ld" - "\nColorBufZipSize %ld" - "\nZippedThumbnail: %02hx 02hx %02hx " -*/ const unsigned int head = strtoul10((++splitter)[1]); if (head != sizeof(Bitmap::BitmapHeader)) { ASSIMP_LOG_WARN("Unexpected ThumbNailHdrSize, skipping this chunk"); return; } - - /*union { - Bitmap::BitmapHeader data; - char opaq[sizeof Bitmap::BitmapHeader()]; - };*/ - // ReadHexOctets(opaq,head,(++splitter)[1]); } // ------------------------------------------------------------------------------------------------ @@ -884,7 +877,10 @@ void COBImporter::ReadBinaryFile(Scene &out, StreamReaderLE *reader) { while (1) { std::string type; - type += reader->GetI1(), type += reader->GetI1(), type += reader->GetI1(), type += reader->GetI1(); + type += reader->GetI1(); + type += reader->GetI1(); + type += reader->GetI1(); + type += reader->GetI1(); ChunkInfo nfo; nfo.version = reader->GetI2() * 10; @@ -906,14 +902,7 @@ void COBImporter::ReadBinaryFile(Scene &out, StreamReaderLE *reader) { ReadCame_Binary(out, *reader, nfo); } else if (type == "Mat1") { ReadMat1_Binary(out, *reader, nfo); - } - /* else if (type == "Bone") { - ReadBone_Binary(out,*reader,nfo); - } - else if (type == "Chan") { - ReadChan_Binary(out,*reader,nfo); - }*/ - else if (type == "Unit") { + } else if (type == "Unit") { ReadUnit_Binary(out, *reader, nfo); } else if (type == "OLay") { // ignore layer index silently. @@ -923,8 +912,9 @@ void COBImporter::ReadBinaryFile(Scene &out, StreamReaderLE *reader) { return UnsupportedChunk_Binary(*reader, nfo, type.c_str()); } else if (type == "END ") { return; - } else + } else { UnsupportedChunk_Binary(*reader, nfo, type.c_str()); + } } } diff --git a/code/AssetLib/Collada/ColladaHelper.h b/code/AssetLib/Collada/ColladaHelper.h index bfd57918e..a7f90bbb3 100644 --- a/code/AssetLib/Collada/ColladaHelper.h +++ b/code/AssetLib/Collada/ColladaHelper.h @@ -48,7 +48,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#include +#include #include #include #include @@ -206,7 +206,8 @@ struct SemanticMappingTable { std::string mMatName; /// List of semantic map commands, grouped by effect semantic name - std::map mMap; + using InputSemanticMap = std::map; + InputSemanticMap mMap; /// For std::find bool operator==(const std::string &s) const { diff --git a/code/AssetLib/Collada/ColladaLoader.cpp b/code/AssetLib/Collada/ColladaLoader.cpp index 99cb8c305..939f72c9c 100644 --- a/code/AssetLib/Collada/ColladaLoader.cpp +++ b/code/AssetLib/Collada/ColladaLoader.cpp @@ -63,6 +63,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace Assimp { using namespace Assimp::Formatter; +using namespace Assimp::Collada; static const aiImporterDesc desc = { "Collada Importer", @@ -271,7 +272,7 @@ aiNode *ColladaLoader::BuildHierarchy(const ColladaParser &pParser, const Collad node->mTransformation = pParser.CalculateResultTransform(pNode->mTransforms); // now resolve node instances - std::vector instances; + std::vector instances; ResolveNodeInstances(pParser, pNode, instances); // add children. first the *real* ones @@ -298,8 +299,8 @@ aiNode *ColladaLoader::BuildHierarchy(const ColladaParser &pParser, const Collad // ------------------------------------------------------------------------------------------------ // Resolve node instances -void ColladaLoader::ResolveNodeInstances(const ColladaParser &pParser, const Collada::Node *pNode, - std::vector &resolved) { +void ColladaLoader::ResolveNodeInstances(const ColladaParser &pParser, const Node *pNode, + std::vector &resolved) { // reserve enough storage resolved.reserve(pNode->mNodeInstances.size()); @@ -307,7 +308,7 @@ void ColladaLoader::ResolveNodeInstances(const ColladaParser &pParser, const Col for (const auto &nodeInst : pNode->mNodeInstances) { // find the corresponding node in the library const ColladaParser::NodeLibrary::const_iterator itt = pParser.mNodeLibrary.find(nodeInst.mNode); - const Collada::Node *nd = itt == pParser.mNodeLibrary.end() ? nullptr : (*itt).second; + const Node *nd = itt == pParser.mNodeLibrary.end() ? nullptr : (*itt).second; // FIX for http://sourceforge.net/tracker/?func=detail&aid=3054873&group_id=226462&atid=1067632 // need to check for both name and ID to catch all. To avoid breaking valid files, @@ -326,13 +327,13 @@ void ColladaLoader::ResolveNodeInstances(const ColladaParser &pParser, const Col // ------------------------------------------------------------------------------------------------ // Resolve UV channels -void ColladaLoader::ApplyVertexToEffectSemanticMapping(Collada::Sampler &sampler, const Collada::SemanticMappingTable &table) { - std::map::const_iterator it = table.mMap.find(sampler.mUVChannel); +void ColladaLoader::ApplyVertexToEffectSemanticMapping(Sampler &sampler, const SemanticMappingTable &table) { + SemanticMappingTable::InputSemanticMap::const_iterator it = table.mMap.find(sampler.mUVChannel); if (it == table.mMap.end()) { return; } - if (it->second.mType != Collada::IT_Texcoord) { + if (it->second.mType != IT_Texcoord) { ASSIMP_LOG_ERROR("Collada: Unexpected effect input mapping"); } @@ -341,8 +342,8 @@ void ColladaLoader::ApplyVertexToEffectSemanticMapping(Collada::Sampler &sampler // ------------------------------------------------------------------------------------------------ // Builds lights for the given node and references them -void ColladaLoader::BuildLightsForNode(const ColladaParser &pParser, const Collada::Node *pNode, aiNode *pTarget) { - for (const Collada::LightInstance &lid : pNode->mLights) { +void ColladaLoader::BuildLightsForNode(const ColladaParser &pParser, const Node *pNode, aiNode *pTarget) { + for (const LightInstance &lid : pNode->mLights) { // find the referred light ColladaParser::LightLibrary::const_iterator srcLightIt = pParser.mLightLibrary.find(lid.mLight); if (srcLightIt == pParser.mLightLibrary.end()) { @@ -406,8 +407,8 @@ void ColladaLoader::BuildLightsForNode(const ColladaParser &pParser, const Colla // ------------------------------------------------------------------------------------------------ // Builds cameras for the given node and references them -void ColladaLoader::BuildCamerasForNode(const ColladaParser &pParser, const Collada::Node *pNode, aiNode *pTarget) { - for (const Collada::CameraInstance &cid : pNode->mCameras) { +void ColladaLoader::BuildCamerasForNode(const ColladaParser &pParser, const Node *pNode, aiNode *pTarget) { + for (const CameraInstance &cid : pNode->mCameras) { // find the referred light ColladaParser::CameraLibrary::const_iterator srcCameraIt = pParser.mCameraLibrary.find(cid.mCamera); if (srcCameraIt == pParser.mCameraLibrary.end()) { @@ -461,15 +462,15 @@ void ColladaLoader::BuildCamerasForNode(const ColladaParser &pParser, const Coll // ------------------------------------------------------------------------------------------------ // Builds meshes for the given node and references them -void ColladaLoader::BuildMeshesForNode(const ColladaParser &pParser, const Collada::Node *pNode, aiNode *pTarget) { +void ColladaLoader::BuildMeshesForNode(const ColladaParser &pParser, const Node *pNode, aiNode *pTarget) { // accumulated mesh references by this node std::vector newMeshRefs; newMeshRefs.reserve(pNode->mMeshes.size()); // add a mesh for each subgroup in each collada mesh - for (const Collada::MeshInstance &mid : pNode->mMeshes) { - const Collada::Mesh *srcMesh = nullptr; - const Collada::Controller *srcController = nullptr; + for (const MeshInstance &mid : pNode->mMeshes) { + const Mesh *srcMesh = nullptr; + const Controller *srcController = nullptr; // find the referred mesh ColladaParser::MeshLibrary::const_iterator srcMeshIt = pParser.mMeshLibrary.find(mid.mMeshOrController); @@ -503,7 +504,7 @@ void ColladaLoader::BuildMeshesForNode(const ColladaParser &pParser, const Colla // find material assigned to this submesh std::string meshMaterial; - std::map::const_iterator meshMatIt = mid.mMaterials.find(submesh.mMaterial); + std::map::const_iterator meshMatIt = mid.mMaterials.find(submesh.mMaterial); const Collada::SemanticMappingTable *table = nullptr; if (meshMatIt != mid.mMaterials.end()) { @@ -557,7 +558,12 @@ void ColladaLoader::BuildMeshesForNode(const ColladaParser &pParser, const Colla faceStart += submesh.mNumFaces; // assign the material index - dstMesh->mMaterialIndex = matIdx; + std::map::const_iterator subMatIt = mMaterialIndexByName.find(submesh.mMaterial); + if (subMatIt != mMaterialIndexByName.end()) { + dstMesh->mMaterialIndex = static_cast(subMatIt->second); + } else { + dstMesh->mMaterialIndex = matIdx; + } if (dstMesh->mName.length == 0) { dstMesh->mName = mid.mMeshOrController; } @@ -586,15 +592,15 @@ aiMesh *ColladaLoader::findMesh(const std::string &meshid) { return nullptr; } - for (unsigned int i = 0; i < mMeshes.size(); ++i) { - if (std::string(mMeshes[i]->mName.data) == meshid) { - return mMeshes[i]; + for (auto & mMeshe : mMeshes) { + if (std::string(mMeshe->mName.data) == meshid) { + return mMeshe; } } - for (unsigned int i = 0; i < mTargetMeshes.size(); ++i) { - if (std::string(mTargetMeshes[i]->mName.data) == meshid) { - return mTargetMeshes[i]; + for (auto & mTargetMeshe : mTargetMeshes) { + if (std::string(mTargetMeshe->mName.data) == meshid) { + return mTargetMeshe; } } @@ -603,8 +609,8 @@ aiMesh *ColladaLoader::findMesh(const std::string &meshid) { // ------------------------------------------------------------------------------------------------ // Creates a mesh for the given ColladaMesh face subset and returns the newly created mesh -aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::Mesh *pSrcMesh, const Collada::SubMesh &pSubMesh, - const Collada::Controller *pSrcController, size_t pStartVertex, size_t pStartFace) { +aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Mesh *pSrcMesh, const SubMesh &pSubMesh, + const Controller *pSrcController, size_t pStartVertex, size_t pStartFace) { std::unique_ptr dstMesh(new aiMesh); if (useColladaName) { @@ -642,7 +648,7 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M std::copy(pSrcMesh->mBitangents.begin() + pStartVertex, pSrcMesh->mBitangents.begin() + pStartVertex + numVertices, dstMesh->mBitangents); } - // same for texturecoords, as many as we have + // same for texture coords, as many as we have // empty slots are not allowed, need to pack and adjust UV indexes accordingly for (size_t a = 0, real = 0; a < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++a) { if (pSrcMesh->mTexCoords[a].size() >= pStartVertex + numVertices) { @@ -682,11 +688,11 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M // create morph target meshes if any std::vector targetMeshes; std::vector targetWeights; - Collada::MorphMethod method = Collada::Normalized; + Collada::MorphMethod method = Normalized; - for (std::map::const_iterator it = pParser.mControllerLibrary.begin(); + for (std::map::const_iterator it = pParser.mControllerLibrary.begin(); it != pParser.mControllerLibrary.end(); ++it) { - const Collada::Controller &c = it->second; + const Controller &c = it->second; const Collada::Mesh *baseMesh = pParser.ResolveLibraryReference(pParser.mMeshLibrary, c.mMeshId); if (c.mType == Collada::Morph && baseMesh->mName == pSrcMesh->mName) { @@ -705,8 +711,8 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M throw DeadlyImportError("target weight data must not be textual "); } - for (unsigned int i = 0; i < targetData.mStrings.size(); ++i) { - const Collada::Mesh *targetMesh = pParser.ResolveLibraryReference(pParser.mMeshLibrary, targetData.mStrings.at(i)); + for (const auto & mString : targetData.mStrings) { + const Mesh *targetMesh = pParser.ResolveLibraryReference(pParser.mMeshLibrary, mString); aiMesh *aimesh = findMesh(useColladaName ? targetMesh->mName : targetMesh->mId); if (!aimesh) { @@ -718,12 +724,12 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M } targetMeshes.push_back(aimesh); } - for (unsigned int i = 0; i < weightData.mValues.size(); ++i) { - targetWeights.push_back(weightData.mValues.at(i)); + for (float mValue : weightData.mValues) { + targetWeights.push_back(mValue); } } } - if (targetMeshes.size() > 0 && targetWeights.size() == targetMeshes.size()) { + if (!targetMeshes.empty() && targetWeights.size() == targetMeshes.size()) { std::vector animMeshes; for (unsigned int i = 0; i < targetMeshes.size(); ++i) { aiMesh *targetMesh = targetMeshes.at(i); @@ -733,7 +739,7 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M animMesh->mName = targetMesh->mName; animMeshes.push_back(animMesh); } - dstMesh->mMethod = (method == Collada::Relative) ? aiMorphingMethod_MORPH_RELATIVE : aiMorphingMethod_MORPH_NORMALIZED; + dstMesh->mMethod = (method == Relative) ? aiMorphingMethod_MORPH_RELATIVE : aiMorphingMethod_MORPH_NORMALIZED; dstMesh->mAnimMeshes = new aiAnimMesh *[animMeshes.size()]; dstMesh->mNumAnimMeshes = static_cast(animMeshes.size()); for (unsigned int i = 0; i < animMeshes.size(); ++i) { @@ -757,18 +763,20 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M const Collada::Accessor &weightsAcc = pParser.ResolveLibraryReference(pParser.mAccessorLibrary, pSrcController->mWeightInputWeights.mAccessor); const Collada::Data &weights = pParser.ResolveLibraryReference(pParser.mDataLibrary, weightsAcc.mSource); - if (!jointNames.mIsStringArray || jointMatrices.mIsStringArray || weights.mIsStringArray) + if (!jointNames.mIsStringArray || jointMatrices.mIsStringArray || weights.mIsStringArray) { throw DeadlyImportError("Data type mismatch while resolving mesh joints"); + } // sanity check: we rely on the vertex weights always coming as pairs of BoneIndex-WeightIndex - if (pSrcController->mWeightInputJoints.mOffset != 0 || pSrcController->mWeightInputWeights.mOffset != 1) + if (pSrcController->mWeightInputJoints.mOffset != 0 || pSrcController->mWeightInputWeights.mOffset != 1) { throw DeadlyImportError("Unsupported vertex_weight addressing scheme. "); + } // create containers to collect the weights for each bone size_t numBones = jointNames.mStrings.size(); std::vector> dstBones(numBones); // build a temporary array of pointers to the start of each vertex's weights - typedef std::vector> IndexPairVector; + using IndexPairVector = std::vector>; std::vector weightStartPerVertex; weightStartPerVertex.resize(pSrcController->mWeightCounts.size(), pSrcController->mWeights.end()); @@ -807,8 +815,8 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M // count the number of bones which influence vertices of the current submesh size_t numRemainingBones = 0; - for (std::vector>::const_iterator it = dstBones.begin(); it != dstBones.end(); ++it) { - if (it->size() > 0) { + for (const auto & dstBone : dstBones) { + if (!dstBone.empty()) { ++numRemainingBones; } } @@ -867,12 +875,12 @@ aiMesh *ColladaLoader::CreateMesh(const ColladaParser &pParser, const Collada::M // and replace the bone's name by the node's name so that the user can use the standard // find-by-name method to associate nodes with bones. const Collada::Node *bnode = FindNode(pParser.mRootNode, bone->mName.data); - if (!bnode) { + if (nullptr == bnode) { bnode = FindNodeBySID(pParser.mRootNode, bone->mName.data); } // assign the name that we would have assigned for the source node - if (bnode) { + if (nullptr != bnode) { bone->mName.Set(FindNameForNode(bnode)); } else { ASSIMP_LOG_WARN_F("ColladaLoader::CreateMesh(): could not find corresponding node for joint \"", bone->mName.data, "\"."); @@ -973,8 +981,8 @@ void ColladaLoader::StoreAnimations(aiScene *pScene, const ColladaParser &pParse std::set animTargets; animTargets.insert(templateAnim->mChannels[0]->mNodeName.C_Str()); bool collectedAnimationsHaveDifferentChannels = true; - for (size_t b = 0; b < collectedAnimIndices.size(); ++b) { - aiAnimation *srcAnimation = mAnims[collectedAnimIndices[b]]; + for (unsigned long long collectedAnimIndice : collectedAnimIndices) { + aiAnimation *srcAnimation = mAnims[(int)collectedAnimIndice]; std::string channelName = std::string(srcAnimation->mChannels[0]->mNodeName.C_Str()); if (animTargets.find(channelName) == animTargets.end()) { animTargets.insert(channelName); @@ -984,8 +992,9 @@ void ColladaLoader::StoreAnimations(aiScene *pScene, const ColladaParser &pParse } } - if (!collectedAnimationsHaveDifferentChannels) + if (!collectedAnimationsHaveDifferentChannels) { continue; + } // if there are other animations which fit the template anim, combine all channels into a single anim if (!collectedAnimIndices.empty()) { @@ -1032,16 +1041,18 @@ void ColladaLoader::StoreAnimations(aiScene *pScene, const ColladaParser &pParse // ------------------------------------------------------------------------------------------------ // Constructs the animations for the given source anim -void ColladaLoader::StoreAnimations(aiScene *pScene, const ColladaParser &pParser, const Collada::Animation *pSrcAnim, const std::string &pPrefix) { +void ColladaLoader::StoreAnimations(aiScene *pScene, const ColladaParser &pParser, const Animation *pSrcAnim, const std::string &pPrefix) { std::string animName = pPrefix.empty() ? pSrcAnim->mName : pPrefix + "_" + pSrcAnim->mName; // create nested animations, if given - for (std::vector::const_iterator it = pSrcAnim->mSubAnims.begin(); it != pSrcAnim->mSubAnims.end(); ++it) - StoreAnimations(pScene, pParser, *it, animName); + for (auto mSubAnim : pSrcAnim->mSubAnims) { + StoreAnimations(pScene, pParser, mSubAnim, animName); + } // create animation channels, if any - if (!pSrcAnim->mChannels.empty()) + if (!pSrcAnim->mChannels.empty()) { CreateAnimation(pScene, pParser, pSrcAnim, animName); + } } struct MorphTimeValues { @@ -1057,7 +1068,7 @@ void insertMorphTimeValue(std::vector &values, float time, floa MorphTimeValues::key k; k.mValue = value; k.mWeight = weight; - if (values.size() == 0 || time < values[0].mTime) { + if (values.empty() || time < values[0].mTime) { MorphTimeValues val; val.mTime = time; val.mKeys.push_back(k); @@ -1083,13 +1094,13 @@ void insertMorphTimeValue(std::vector &values, float time, floa return; } } - // should not get here } -float getWeightAtKey(const std::vector &values, int key, unsigned int value) { - for (unsigned int i = 0; i < values[key].mKeys.size(); i++) { - if (values[key].mKeys[i].mValue == value) - return values[key].mKeys[i].mWeight; +static float getWeightAtKey(const std::vector &values, int key, unsigned int value) { + for (auto mKey : values[key].mKeys) { + if (mKey.mValue == value) { + return mKey.mWeight; + } } // no value at key found, try to interpolate if present at other keys. if not, return zero // TODO: interpolation @@ -1098,7 +1109,7 @@ float getWeightAtKey(const std::vector &values, int key, unsign // ------------------------------------------------------------------------------------------------ // Constructs the animation for the given source anim -void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParser, const Collada::Animation *pSrcAnim, const std::string &pName) { +void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParser, const Animation *pSrcAnim, const std::string &pName) { // collect a list of animatable nodes std::vector nodes; CollectNodes(pScene->mRootNode, nodes); @@ -1106,23 +1117,23 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse std::vector anims; std::vector morphAnims; - for (std::vector::const_iterator nit = nodes.begin(); nit != nodes.end(); ++nit) { + for (auto node : nodes) { // find all the collada anim channels which refer to the current node - std::vector entries; - std::string nodeName = (*nit)->mName.data; + std::vector entries; + std::string nodeName = node->mName.data; // find the collada node corresponding to the aiNode - const Collada::Node *srcNode = FindNode(pParser.mRootNode, nodeName); + const Node *srcNode = FindNode(pParser.mRootNode, nodeName); if (!srcNode) { continue; } // now check all channels if they affect the current node std::string targetID, subElement; - for (std::vector::const_iterator cit = pSrcAnim->mChannels.begin(); + for (std::vector::const_iterator cit = pSrcAnim->mChannels.begin(); cit != pSrcAnim->mChannels.end(); ++cit) { - const Collada::AnimationChannel &srcChannel = *cit; - Collada::ChannelEntry entry; + const AnimationChannel &srcChannel = *cit; + ChannelEntry entry; // we expect the animation target to be of type "nodeName/transformID.subElement". Ignore all others // find the slash that separates the node name - there should be only one @@ -1137,24 +1148,28 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse entry.mChannel = &(*cit); entry.mTargetId = srcChannel.mTarget.substr(targetPos + pSrcAnim->mName.length(), srcChannel.mTarget.length() - targetPos - pSrcAnim->mName.length()); - if (entry.mTargetId.front() == '-') + if (entry.mTargetId.front() == '-') { entry.mTargetId = entry.mTargetId.substr(1); + } entries.push_back(entry); continue; } - if (srcChannel.mTarget.find('/', slashPos + 1) != std::string::npos) + if (srcChannel.mTarget.find('/', slashPos + 1) != std::string::npos) { continue; + } targetID.clear(); targetID = srcChannel.mTarget.substr(0, slashPos); - if (targetID != srcNode->mID) + if (targetID != srcNode->mID) { continue; + } // find the dot that separates the transformID - there should be only one or zero std::string::size_type dotPos = srcChannel.mTarget.find('.'); if (dotPos != std::string::npos) { - if (srcChannel.mTarget.find('.', dotPos + 1) != std::string::npos) + if (srcChannel.mTarget.find('.', dotPos + 1) != std::string::npos) { continue; + } entry.mTransformId = srcChannel.mTarget.substr(slashPos + 1, dotPos - slashPos - 1); @@ -1171,7 +1186,7 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse else ASSIMP_LOG_WARN_F("Unknown anim subelement <", subElement, ">. Ignoring"); } else { - // no subelement following, transformId is remaining string + // no sub-element following, transformId is remaining string entry.mTransformId = srcChannel.mTarget.substr(slashPos + 1); } @@ -1222,11 +1237,11 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse entry.mTransformIndex = a; if (entry.mTransformIndex == SIZE_MAX) { - if (entry.mTransformId.find("morph-weights") != std::string::npos) { - entry.mTargetId = entry.mTransformId; - entry.mTransformId = ""; - } else + if (entry.mTransformId.find("morph-weights") == std::string::npos) { continue; + } + entry.mTargetId = entry.mTransformId; + entry.mTransformId = ""; } entry.mChannel = &(*cit); @@ -1234,21 +1249,22 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse } // if there's no channel affecting the current node, we skip it - if (entries.empty()) + if (entries.empty()) { continue; + } // resolve the data pointers for all anim channels. Find the minimum time while we're at it ai_real startTime = ai_real(1e20), endTime = ai_real(-1e20); - for (std::vector::iterator it = entries.begin(); it != entries.end(); ++it) { - Collada::ChannelEntry &e = *it; + for (ChannelEntry & e : entries) { e.mTimeAccessor = &pParser.ResolveLibraryReference(pParser.mAccessorLibrary, e.mChannel->mSourceTimes); e.mTimeData = &pParser.ResolveLibraryReference(pParser.mDataLibrary, e.mTimeAccessor->mSource); e.mValueAccessor = &pParser.ResolveLibraryReference(pParser.mAccessorLibrary, e.mChannel->mSourceValues); e.mValueData = &pParser.ResolveLibraryReference(pParser.mDataLibrary, e.mValueAccessor->mSource); // time count and value count must match - if (e.mTimeAccessor->mCount != e.mValueAccessor->mCount) + if (e.mTimeAccessor->mCount != e.mValueAccessor->mCount) { throw DeadlyImportError("Time count / value count mismatch in animation channel \"", e.mChannel->mTarget, "\"."); + } if (e.mTimeAccessor->mCount > 0) { // find bounding times @@ -1266,18 +1282,18 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse // and apply them to the transform chain. Then the node's present transformation can be calculated. ai_real time = startTime; while (1) { - for (std::vector::iterator it = entries.begin(); it != entries.end(); ++it) { - Collada::ChannelEntry &e = *it; - + for (ChannelEntry & e : entries) { // find the keyframe behind the current point in time size_t pos = 0; ai_real postTime = 0.0; while (1) { - if (pos >= e.mTimeAccessor->mCount) + if (pos >= e.mTimeAccessor->mCount) { break; + } postTime = ReadFloat(*e.mTimeAccessor, *e.mTimeData, pos, 0); - if (postTime >= time) + if (postTime >= time) { break; + } ++pos; } @@ -1285,8 +1301,9 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse // read values from there ai_real temp[16]; - for (size_t c = 0; c < e.mValueAccessor->mSize; ++c) + for (size_t c = 0; c < e.mValueAccessor->mSize; ++c) { temp[c] = ReadFloat(*e.mValueAccessor, *e.mValueData, pos, c); + } // if not exactly at the key time, interpolate with previous value set if (postTime > time && pos > 0) { @@ -1312,9 +1329,7 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse // find next point in time to evaluate. That's the closest frame larger than the current in any channel ai_real nextTime = ai_real(1e20); - for (std::vector::iterator it = entries.begin(); it != entries.end(); ++it) { - Collada::ChannelEntry &channelElement = *it; - + for (ChannelEntry & channelElement : entries) { // find the next time value larger than the current size_t pos = 0; while (pos < channelElement.mTimeAccessor->mCount) { @@ -1329,7 +1344,7 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse // https://github.com/assimp/assimp/issues/458 // Sub-sample axis-angle channels if the delta between two consecutive // key-frame angles is >= 180 degrees. - if (transforms[channelElement.mTransformIndex].mType == Collada::TF_ROTATE && channelElement.mSubElement == 3 && pos > 0 && pos < channelElement.mTimeAccessor->mCount) { + if (transforms[channelElement.mTransformIndex].mType == TF_ROTATE && channelElement.mSubElement == 3 && pos > 0 && pos < channelElement.mTimeAccessor->mCount) { const ai_real cur_key_angle = ReadFloat(*channelElement.mValueAccessor, *channelElement.mValueData, pos, 0); const ai_real last_key_angle = ReadFloat(*channelElement.mValueAccessor, *channelElement.mValueData, pos - 1, 0); const ai_real cur_key_time = ReadFloat(*channelElement.mTimeAccessor, *channelElement.mTimeData, pos, 0); @@ -1347,17 +1362,15 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse } // no more keys on any channel after the current time -> we're done - if (nextTime > 1e19) + if (nextTime > 1e19) { break; + } - // else construct next keyframe at this following time point + // else construct next key-frame at this following time point time = nextTime; } } - // there should be some keyframes, but we aren't that fixated on valid input data - // ai_assert( resultTrafos.size() > 0); - // build an animation channel for the given node out of these trafo keys if (!resultTrafos.empty()) { aiNodeAnim *dstAnim = new aiNodeAnim; @@ -1386,16 +1399,16 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse } if (!entries.empty() && entries.front().mTimeAccessor->mCount > 0) { - std::vector morphChannels; - for (std::vector::iterator it = entries.begin(); it != entries.end(); ++it) { - Collada::ChannelEntry &e = *it; - + std::vector morphChannels; + for (ChannelEntry & e : entries) { // skip non-transform types - if (e.mTargetId.empty()) + if (e.mTargetId.empty()) { continue; + } - if (e.mTargetId.find("morph-weights") != std::string::npos) + if (e.mTargetId.find("morph-weights") != std::string::npos) { morphChannels.push_back(e); + } } if (!morphChannels.empty()) { // either 1) morph weight animation count should contain morph target count channels @@ -1407,13 +1420,14 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse std::vector morphTimeValues; int morphAnimChannelIndex = 0; - for (std::vector::iterator it = morphChannels.begin(); it != morphChannels.end(); ++it) { - Collada::ChannelEntry &e = *it; + for (ChannelEntry & e : morphChannels) { std::string::size_type apos = e.mTargetId.find('('); std::string::size_type bpos = e.mTargetId.find(')'); - if (apos == std::string::npos || bpos == std::string::npos) - // unknown way to specify weight -> ignore this animation + + // If unknown way to specify weight -> ignore this animation + if (apos == std::string::npos || bpos == std::string::npos) { continue; + } // weight target can be in format Weight_M_N, Weight_N, WeightN, or some other way // we ignore the name and just assume the channels are in the right order @@ -1457,13 +1471,13 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse std::copy(morphAnims.begin(), morphAnims.end(), anim->mMorphMeshChannels); } anim->mDuration = 0.0f; - for (size_t a = 0; a < anims.size(); ++a) { - anim->mDuration = std::max(anim->mDuration, anims[a]->mPositionKeys[anims[a]->mNumPositionKeys - 1].mTime); - anim->mDuration = std::max(anim->mDuration, anims[a]->mRotationKeys[anims[a]->mNumRotationKeys - 1].mTime); - anim->mDuration = std::max(anim->mDuration, anims[a]->mScalingKeys[anims[a]->mNumScalingKeys - 1].mTime); + for (auto & a : anims) { + anim->mDuration = std::max(anim->mDuration, a->mPositionKeys[a->mNumPositionKeys - 1].mTime); + anim->mDuration = std::max(anim->mDuration, a->mRotationKeys[a->mNumRotationKeys - 1].mTime); + anim->mDuration = std::max(anim->mDuration, a->mScalingKeys[a->mNumScalingKeys - 1].mTime); } - for (size_t a = 0; a < morphAnims.size(); ++a) { - anim->mDuration = std::max(anim->mDuration, morphAnims[a]->mKeys[morphAnims[a]->mNumKeys - 1].mTime); + for (auto & morphAnim : morphAnims) { + anim->mDuration = std::max(anim->mDuration, morphAnim->mKeys[morphAnim->mNumKeys - 1].mTime); } anim->mTicksPerSecond = 1000.0; mAnims.push_back(anim); @@ -1472,10 +1486,12 @@ void ColladaLoader::CreateAnimation(aiScene *pScene, const ColladaParser &pParse // ------------------------------------------------------------------------------------------------ // Add a texture to a material structure -void ColladaLoader::AddTexture(aiMaterial &mat, const ColladaParser &pParser, - const Collada::Effect &effect, - const Collada::Sampler &sampler, - aiTextureType type, unsigned int idx) { +void ColladaLoader::AddTexture(aiMaterial &mat, + const ColladaParser &pParser, + const Effect &effect, + const Sampler &sampler, + aiTextureType type, + unsigned int idx) { // first of all, basic file name const aiString name = FindFilenameForEffectTexture(pParser, effect, sampler.mName); mat.AddProperty(&name, _AI_MATKEY_TEXTURE_BASE, type, idx); @@ -1574,7 +1590,7 @@ void ColladaLoader::FillMaterials(const ColladaParser &pParser, aiScene * /*pSce shadeMode = effect.mDoubleSided; mat.AddProperty(&shadeMode, 1, AI_MATKEY_TWOSIDED); - // wireframe? + // wire-frame? shadeMode = effect.mWireframe; mat.AddProperty(&shadeMode, 1, AI_MATKEY_ENABLE_WIREFRAME); @@ -1652,12 +1668,12 @@ void ColladaLoader::BuildMaterials(ColladaParser &pParser, aiScene * /*pScene*/) for (ColladaParser::MaterialLibrary::const_iterator matIt = pParser.mMaterialLibrary.begin(); matIt != pParser.mMaterialLibrary.end(); ++matIt) { - const Collada::Material &material = matIt->second; + const Material &material = matIt->second; // a material is only a reference to an effect ColladaParser::EffectLibrary::iterator effIt = pParser.mEffectLibrary.find(material.mEffect); if (effIt == pParser.mEffectLibrary.end()) continue; - Collada::Effect &effect = effIt->second; + Effect &effect = effIt->second; // create material aiMaterial *mat = new aiMaterial; @@ -1666,7 +1682,7 @@ void ColladaLoader::BuildMaterials(ColladaParser &pParser, aiScene * /*pScene*/) // store the material mMaterialIndexByName[matIt->first] = newMats.size(); - newMats.push_back(std::pair(&effect, mat)); + newMats.push_back(std::pair(&effect, mat)); } // ScenePreprocessor generates a default material automatically if none is there. // All further code here in this loader works well without a valid material so @@ -1674,17 +1690,16 @@ void ColladaLoader::BuildMaterials(ColladaParser &pParser, aiScene * /*pScene*/) } // ------------------------------------------------------------------------------------------------ -// Resolves the texture name for the given effect texture entry -// and loads the texture data +// Resolves the texture name for the given effect texture entry and loads the texture data aiString ColladaLoader::FindFilenameForEffectTexture(const ColladaParser &pParser, - const Collada::Effect &pEffect, const std::string &pName) { + const Effect &pEffect, const std::string &pName) { aiString result; // recurse through the param references until we end up at an image std::string name = pName; while (1) { // the given string is a param entry. Find it - Collada::Effect::ParamLibrary::const_iterator it = pEffect.mParams.find(name); + Effect::ParamLibrary::const_iterator it = pEffect.mParams.find(name); // if not found, we're at the end of the recursion. The resulting string should be the image ID if (it == pEffect.mParams.end()) break; @@ -1712,10 +1727,6 @@ aiString ColladaLoader::FindFilenameForEffectTexture(const ColladaParser &pParse tex->mFilename.Set(imIt->second.mFileName.c_str()); result.Set(imIt->second.mFileName); - // TODO: check the possibility of using the flag "AI_CONFIG_IMPORT_FBX_EMBEDDED_TEXTURES_LEGACY_NAMING" - // result.data[0] = '*'; - // result.length = 1 + ASSIMP_itoa10(result.data + 1, static_cast(MAXLEN - 1), static_cast(mTextures.size())); - // setup format hint if (imIt->second.mEmbeddedFormat.length() >= HINTMAXTEXTURELEN) { ASSIMP_LOG_WARN("Collada: texture format hint is too long, truncating to 3 characters"); @@ -1744,7 +1755,7 @@ aiString ColladaLoader::FindFilenameForEffectTexture(const ColladaParser &pParse // ------------------------------------------------------------------------------------------------ // Reads a float value from an accessor and its data array. -ai_real ColladaLoader::ReadFloat(const Collada::Accessor &pAccessor, const Collada::Data &pData, size_t pIndex, size_t pOffset) const { +ai_real ColladaLoader::ReadFloat(const Accessor &pAccessor, const Data &pData, size_t pIndex, size_t pOffset) const { size_t pos = pAccessor.mStride * pIndex + pAccessor.mOffset + pOffset; ai_assert(pos < pData.mValues.size()); return pData.mValues[pos]; @@ -1752,7 +1763,7 @@ ai_real ColladaLoader::ReadFloat(const Collada::Accessor &pAccessor, const Colla // ------------------------------------------------------------------------------------------------ // Reads a string value from an accessor and its data array. -const std::string &ColladaLoader::ReadString(const Collada::Accessor &pAccessor, const Collada::Data &pData, size_t pIndex) const { +const std::string &ColladaLoader::ReadString(const Accessor &pAccessor, const Data &pData, size_t pIndex) const { size_t pos = pAccessor.mStride * pIndex + pAccessor.mOffset; ai_assert(pos < pData.mStrings.size()); return pData.mStrings[pos]; @@ -1769,12 +1780,12 @@ void ColladaLoader::CollectNodes(const aiNode *pNode, std::vectormName == pName || pNode->mID == pName) return pNode; - for (size_t a = 0; a < pNode->mChildren.size(); ++a) { - const Collada::Node *node = FindNode(pNode->mChildren[a], pName); + for (auto a : pNode->mChildren) { + const Collada::Node *node = FindNode(a, pName); if (node) { return node; } @@ -1785,7 +1796,7 @@ const Collada::Node *ColladaLoader::FindNode(const Collada::Node *pNode, const s // ------------------------------------------------------------------------------------------------ // Finds a node in the collada scene by the given SID -const Collada::Node *ColladaLoader::FindNodeBySID(const Collada::Node *pNode, const std::string &pSID) const { +const Node *ColladaLoader::FindNodeBySID(const Node *pNode, const std::string &pSID) const { if (nullptr == pNode) { return nullptr; } @@ -1794,8 +1805,8 @@ const Collada::Node *ColladaLoader::FindNodeBySID(const Collada::Node *pNode, co return pNode; } - for (size_t a = 0; a < pNode->mChildren.size(); ++a) { - const Collada::Node *node = FindNodeBySID(pNode->mChildren[a], pSID); + for (auto a : pNode->mChildren) { + const Collada::Node *node = FindNodeBySID(a, pSID); if (node) { return node; } @@ -1807,7 +1818,7 @@ const Collada::Node *ColladaLoader::FindNodeBySID(const Collada::Node *pNode, co // ------------------------------------------------------------------------------------------------ // Finds a proper unique name for a node derived from the collada-node's properties. // The name must be unique for proper node-bone association. -std::string ColladaLoader::FindNameForNode(const Collada::Node *pNode) { +std::string ColladaLoader::FindNameForNode(const Node *pNode) { // If explicitly requested, just use the collada name. if (useColladaName) { if (!pNode->mName.empty()) { diff --git a/code/AssetLib/Collada/ColladaLoader.h b/code/AssetLib/Collada/ColladaLoader.h index 198b7a215..9e2980e3c 100644 --- a/code/AssetLib/Collada/ColladaLoader.h +++ b/code/AssetLib/Collada/ColladaLoader.h @@ -4,7 +4,7 @@ Open Asset Import Library (assimp) ---------------------------------------------------------------------- -Copyright (c) 2006-2020, assimp team +Copyright (c) 2006-2021, assimp team All rights reserved. @@ -45,8 +45,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef AI_COLLADALOADER_H_INC #define AI_COLLADALOADER_H_INC -#include #include "ColladaParser.h" +#include struct aiNode; struct aiCamera; @@ -54,28 +54,24 @@ struct aiLight; struct aiTexture; struct aiAnimation; -namespace Assimp -{ +namespace Assimp { -struct ColladaMeshIndex -{ +struct ColladaMeshIndex { std::string mMeshID; size_t mSubMesh; std::string mMaterial; - ColladaMeshIndex( const std::string& pMeshID, size_t pSubMesh, const std::string& pMaterial) - : mMeshID( pMeshID), mSubMesh( pSubMesh), mMaterial( pMaterial) - { } + ColladaMeshIndex(const std::string &pMeshID, size_t pSubMesh, const std::string &pMaterial) : + mMeshID(pMeshID), mSubMesh(pSubMesh), mMaterial(pMaterial) { + ai_assert(!pMeshID.empty()); + } - bool operator < (const ColladaMeshIndex& p) const - { - if( mMeshID == p.mMeshID) - { - if( mSubMesh == p.mSubMesh) + bool operator<(const ColladaMeshIndex &p) const { + if (mMeshID == p.mMeshID) { + if (mSubMesh == p.mSubMesh) return mMaterial < p.mMaterial; else return mSubMesh < p.mSubMesh; - } else - { + } else { return mMeshID < p.mMeshID; } } @@ -84,105 +80,102 @@ struct ColladaMeshIndex /** Loader class to read Collada scenes. Collada is over-engineered to death, with every new iteration bringing * more useless stuff, so I limited the data to what I think is useful for games. */ -class ColladaLoader : public BaseImporter -{ +class ColladaLoader : public BaseImporter { public: + /// The class constructor. ColladaLoader(); - ~ColladaLoader(); + /// The class destructor. + ~ColladaLoader() override; -public: - /** Returns whether the class can handle the format of the given file. - * See BaseImporter::CanRead() for details. */ - bool CanRead(const std::string& pFile, IOSystem* pIOHandler, bool checkSig) const override; + /// Returns whether the class can handle the format of the given file. + /// @see BaseImporter::CanRead() for more details. + bool CanRead(const std::string &pFile, IOSystem *pIOHandler, bool checkSig) const override; protected: - /** Return importer meta information. - * See #BaseImporter::GetInfo for the details - */ - const aiImporterDesc* GetInfo () const override; + /// See #BaseImporter::GetInfo for the details + const aiImporterDesc *GetInfo() const override; - void SetupProperties(const Importer* pImp) override; + /// See #BaseImporter::SetupProperties for the details + void SetupProperties(const Importer *pImp) override; - /** Imports the given file into the given scene structure. - * See BaseImporter::InternReadFile() for details - */ - void InternReadFile( const std::string& pFile, aiScene* pScene, IOSystem* pIOHandler) override; + /// See #BaseImporter::InternReadFile for the details + void InternReadFile(const std::string &pFile, aiScene *pScene, IOSystem *pIOHandler) override; /** Recursively constructs a scene node for the given parser node and returns it. */ - aiNode* BuildHierarchy( const ColladaParser& pParser, const Collada::Node* pNode); + aiNode *BuildHierarchy(const ColladaParser &pParser, const Collada::Node *pNode); /** Resolve node instances */ - void ResolveNodeInstances( const ColladaParser& pParser, const Collada::Node* pNode, - std::vector& resolved); + void ResolveNodeInstances(const ColladaParser &pParser, const Collada::Node *pNode, + std::vector &resolved); /** Builds meshes for the given node and references them */ - void BuildMeshesForNode( const ColladaParser& pParser, const Collada::Node* pNode, - aiNode* pTarget); - - aiMesh *findMesh(const std::string& meshid); + void BuildMeshesForNode(const ColladaParser &pParser, const Collada::Node *pNode, + aiNode *pTarget); + + aiMesh *findMesh(const std::string &meshid); /** Creates a mesh for the given ColladaMesh face subset and returns the newly created mesh */ - aiMesh* CreateMesh( const ColladaParser& pParser, const Collada::Mesh* pSrcMesh, const Collada::SubMesh& pSubMesh, - const Collada::Controller* pSrcController, size_t pStartVertex, size_t pStartFace); + aiMesh *CreateMesh(const ColladaParser &pParser, const Collada::Mesh *pSrcMesh, const Collada::SubMesh &pSubMesh, + const Collada::Controller *pSrcController, size_t pStartVertex, size_t pStartFace); /** Builds cameras for the given node and references them */ - void BuildCamerasForNode( const ColladaParser& pParser, const Collada::Node* pNode, - aiNode* pTarget); + void BuildCamerasForNode(const ColladaParser &pParser, const Collada::Node *pNode, + aiNode *pTarget); /** Builds lights for the given node and references them */ - void BuildLightsForNode( const ColladaParser& pParser, const Collada::Node* pNode, - aiNode* pTarget); + void BuildLightsForNode(const ColladaParser &pParser, const Collada::Node *pNode, + aiNode *pTarget); /** Stores all meshes in the given scene */ - void StoreSceneMeshes( aiScene* pScene); + void StoreSceneMeshes(aiScene *pScene); /** Stores all materials in the given scene */ - void StoreSceneMaterials( aiScene* pScene); + void StoreSceneMaterials(aiScene *pScene); /** Stores all lights in the given scene */ - void StoreSceneLights( aiScene* pScene); + void StoreSceneLights(aiScene *pScene); /** Stores all cameras in the given scene */ - void StoreSceneCameras( aiScene* pScene); + void StoreSceneCameras(aiScene *pScene); /** Stores all textures in the given scene */ - void StoreSceneTextures( aiScene* pScene); + void StoreSceneTextures(aiScene *pScene); /** Stores all animations * @param pScene target scene to store the anims */ - void StoreAnimations( aiScene* pScene, const ColladaParser& pParser); + void StoreAnimations(aiScene *pScene, const ColladaParser &pParser); /** Stores all animations for the given source anim and its nested child animations * @param pScene target scene to store the anims * @param pSrcAnim the source animation to process * @param pPrefix Prefix to the name in case of nested animations */ - void StoreAnimations( aiScene* pScene, const ColladaParser& pParser, const Collada::Animation* pSrcAnim, const std::string& pPrefix); + void StoreAnimations(aiScene *pScene, const ColladaParser &pParser, const Collada::Animation *pSrcAnim, const std::string &pPrefix); /** Constructs the animation for the given source anim */ - void CreateAnimation( aiScene* pScene, const ColladaParser& pParser, const Collada::Animation* pSrcAnim, const std::string& pName); + void CreateAnimation(aiScene *pScene, const ColladaParser &pParser, const Collada::Animation *pSrcAnim, const std::string &pName); /** Constructs materials from the collada material definitions */ - void BuildMaterials( ColladaParser& pParser, aiScene* pScene); + void BuildMaterials(ColladaParser &pParser, aiScene *pScene); /** Fill materials from the collada material definitions */ - void FillMaterials( const ColladaParser& pParser, aiScene* pScene); + void FillMaterials(const ColladaParser &pParser, aiScene *pScene); /** Resolve UV channel mappings*/ - void ApplyVertexToEffectSemanticMapping(Collada::Sampler& sampler, - const Collada::SemanticMappingTable& table); + void ApplyVertexToEffectSemanticMapping(Collada::Sampler &sampler, + const Collada::SemanticMappingTable &table); /** Add a texture and all of its sampling properties to a material*/ - void AddTexture ( aiMaterial& mat, const ColladaParser& pParser, - const Collada::Effect& effect, - const Collada::Sampler& sampler, - aiTextureType type, unsigned int idx = 0); + void AddTexture(aiMaterial &mat, const ColladaParser &pParser, + const Collada::Effect &effect, + const Collada::Sampler &sampler, + aiTextureType type, unsigned int idx = 0); /** Resolves the texture name for the given effect texture entry */ - aiString FindFilenameForEffectTexture( const ColladaParser& pParser, - const Collada::Effect& pEffect, const std::string& pName); + aiString FindFilenameForEffectTexture(const ColladaParser &pParser, + const Collada::Effect &pEffect, const std::string &pName); /** Reads a float value from an accessor and its data array. * @param pAccessor The accessor to use for reading @@ -191,7 +184,7 @@ protected: * @param pOffset Offset into the element, for multipart elements such as vectors or matrices * @return the specified value */ - ai_real ReadFloat( const Collada::Accessor& pAccessor, const Collada::Data& pData, size_t pIndex, size_t pOffset) const; + ai_real ReadFloat(const Collada::Accessor &pAccessor, const Collada::Data &pData, size_t pIndex, size_t pOffset) const; /** Reads a string value from an accessor and its data array. * @param pAccessor The accessor to use for reading @@ -199,18 +192,18 @@ protected: * @param pIndex The index of the element to retrieve * @return the specified value */ - const std::string& ReadString( const Collada::Accessor& pAccessor, const Collada::Data& pData, size_t pIndex) const; + const std::string &ReadString(const Collada::Accessor &pAccessor, const Collada::Data &pData, size_t pIndex) const; /** Recursively collects all nodes into the given array */ - void CollectNodes( const aiNode* pNode, std::vector& poNodes) const; + void CollectNodes(const aiNode *pNode, std::vector &poNodes) const; /** Finds a node in the collada scene by the given name */ - const Collada::Node* FindNode( const Collada::Node* pNode, const std::string& pName) const; + const Collada::Node *FindNode(const Collada::Node *pNode, const std::string &pName) const; /** Finds a node in the collada scene by the given SID */ - const Collada::Node* FindNodeBySID( const Collada::Node* pNode, const std::string& pSID) const; + const Collada::Node *FindNodeBySID(const Collada::Node *pNode, const std::string &pSID) const; /** Finds a proper name for a node derived from the collada-node's properties */ - std::string FindNameForNode( const Collada::Node* pNode); + std::string FindNameForNode(const Collada::Node *pNode); protected: /** Filename, for a verbose error message */ @@ -223,25 +216,25 @@ protected: std::map mMaterialIndexByName; /** Accumulated meshes for the target scene */ - std::vector mMeshes; - + std::vector mMeshes; + /** Accumulated morph target meshes */ - std::vector mTargetMeshes; + std::vector mTargetMeshes; /** Temporary material list */ - std::vector > newMats; + std::vector> newMats; /** Temporary camera list */ - std::vector mCameras; + std::vector mCameras; /** Temporary light list */ - std::vector mLights; + std::vector mLights; /** Temporary texture list */ - std::vector mTextures; + std::vector mTextures; /** Accumulated animations for the target scene */ - std::vector mAnims; + std::vector mAnims; bool noSkeletonMesh; bool ignoreUpDirection; diff --git a/code/AssetLib/Collada/ColladaParser.cpp b/code/AssetLib/Collada/ColladaParser.cpp index d84f76340..a12add904 100644 --- a/code/AssetLib/Collada/ColladaParser.cpp +++ b/code/AssetLib/Collada/ColladaParser.cpp @@ -54,6 +54,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include #include +#include using namespace Assimp; using namespace Assimp::Collada; @@ -158,9 +159,9 @@ ColladaParser::ColladaParser(IOSystem *pIOHandler, const std::string &pFile) : if (colladaNode.empty()) { return; } - ReadContents(colladaNode); - // read embedded textures + // Read content and embedded textures + ReadContents(colladaNode); if (zip_archive && zip_archive->isOpen()) { ReadEmbeddedTextures(*zip_archive); } @@ -169,11 +170,11 @@ ColladaParser::ColladaParser(IOSystem *pIOHandler, const std::string &pFile) : // ------------------------------------------------------------------------------------------------ // Destructor, private as well ColladaParser::~ColladaParser() { - for (NodeLibrary::iterator it = mNodeLibrary.begin(); it != mNodeLibrary.end(); ++it) { - delete it->second; + for (auto & it : mNodeLibrary) { + delete it.second; } - for (MeshLibrary::iterator it = mMeshLibrary.begin(); it != mMeshLibrary.end(); ++it) { - delete it->second; + for (auto & it : mMeshLibrary) { + delete it.second; } } @@ -289,7 +290,7 @@ void ColladaParser::ReadContents(XmlNode &node) { // Reads the structure of the file void ColladaParser::ReadStructure(XmlNode &node) { for (XmlNode ¤tNode : node.children()) { - const std::string ¤tName = std::string(currentNode.name()); + const std::string ¤tName = currentNode.name(); if (currentName == "asset") { ReadAssetInfo(currentNode); } else if (currentName == "library_animations") { @@ -334,7 +335,7 @@ void ColladaParser::ReadAssetInfo(XmlNode &node) { const std::string ¤tName = currentNode.name(); if (currentName == "unit") { mUnitSize = 1.f; - XmlParser::getFloatAttribute(node, "meter", mUnitSize); + XmlParser::getFloatAttribute(currentNode, "meter", mUnitSize); } else if (currentName == "up_axis") { std::string v; if (!XmlParser::getValueAsString(currentNode, v)) { @@ -407,7 +408,7 @@ void ColladaParser::ReadAnimationClipLibrary(XmlNode &node) { const std::string ¤tName = currentNode.name(); if (currentName == "instance_animation") { std::string url; - readUrlAttribute(node, url); + readUrlAttribute(currentNode, url); clip.second.push_back(url); } @@ -419,8 +420,8 @@ void ColladaParser::ReadAnimationClipLibrary(XmlNode &node) { void ColladaParser::PostProcessControllers() { std::string meshId; - for (ControllerLibrary::iterator it = mControllerLibrary.begin(); it != mControllerLibrary.end(); ++it) { - meshId = it->second.mMeshId; + for (auto & it : mControllerLibrary) { + meshId = it.second.mMeshId; if (meshId.empty()) { continue; } @@ -431,7 +432,7 @@ void ColladaParser::PostProcessControllers() { findItr = mControllerLibrary.find(meshId); } - it->second.mMeshId = meshId; + it.second.mMeshId = meshId; } } @@ -444,22 +445,19 @@ void ColladaParser::PostProcessRootAnimations() { } Animation temp; - for (AnimationClipLibrary::iterator it = mAnimationClipLibrary.begin(); it != mAnimationClipLibrary.end(); ++it) { - std::string clipName = it->first; + for (auto & it : mAnimationClipLibrary) { + std::string clipName = it.first; Animation *clip = new Animation(); clip->mName = clipName; temp.mSubAnims.push_back(clip); - for (std::vector::iterator a = it->second.begin(); a != it->second.end(); ++a) { - std::string animationID = *a; - + for (std::string animationID : it.second) { AnimationLibrary::iterator animation = mAnimationLibrary.find(animationID); if (animation != mAnimationLibrary.end()) { Animation *pSourceAnimation = animation->second; - pSourceAnimation->CollectChannelsRecursively(clip->mChannels); } } @@ -495,7 +493,7 @@ void ColladaParser::ReadAnimation(XmlNode &node, Collada::Animation *pParent) { // an element may be a container for grouping sub-elements or an animation channel // this is the channel collection by ID, in case it has channels - using ChannelMap = std::map ; + using ChannelMap = std::map; ChannelMap channels; // this is the anim container in case we're a container Animation *anim = nullptr; @@ -531,17 +529,17 @@ void ColladaParser::ReadAnimation(XmlNode &node, Collada::Animation *pParent) { // have it read into a channel ChannelMap::iterator newChannel = channels.insert(std::make_pair(id, AnimationChannel())).first; ReadAnimationSampler(currentNode, newChannel->second); - } else if (currentName == "channel") { - std::string source_name, target; - XmlParser::getStdStrAttribute(currentNode, "source", source_name); - XmlParser::getStdStrAttribute(currentNode, "target", target); - if (source_name[0] == '#') { - source_name = source_name.substr(1, source_name.size() - 1); - } - ChannelMap::iterator cit = channels.find(source_name); - if (cit != channels.end()) { - cit->second.mTarget = target; - } + } + } else if (currentName == "channel") { + std::string source_name, target; + XmlParser::getStdStrAttribute(currentNode, "source", source_name); + XmlParser::getStdStrAttribute(currentNode, "target", target); + if (source_name[0] == '#') { + source_name = source_name.substr(1, source_name.size() - 1); + } + ChannelMap::iterator cit = channels.find(source_name); + if (cit != channels.end()) { + cit->second.mTarget = target; } } } @@ -554,8 +552,8 @@ void ColladaParser::ReadAnimation(XmlNode &node, Collada::Animation *pParent) { pParent->mSubAnims.push_back(anim); } - for (ChannelMap::const_iterator it = channels.begin(); it != channels.end(); ++it) { - anim->mChannels.push_back(it->second); + for (const auto & channel : channels) { + anim->mChannels.push_back(channel.second); } if (idAttr) { @@ -610,50 +608,62 @@ void ColladaParser::ReadControllerLibrary(XmlNode &node) { if (currentName != "controller") { continue; } - std::string id = node.attribute("id").as_string(); - mControllerLibrary[id] = Controller(); - ReadController(node, mControllerLibrary[id]); + std::string id; + if (XmlParser::getStdStrAttribute(currentNode, "id", id)) { + mControllerLibrary[id] = Controller(); + ReadController(currentNode, mControllerLibrary[id]); + } } } // ------------------------------------------------------------------------------------------------ // Reads a controller into the given mesh structure -void ColladaParser::ReadController(XmlNode &node, Collada::Controller &pController) { +void ColladaParser::ReadController(XmlNode &node, Collada::Controller &controller) { // initial values - pController.mType = Skin; - pController.mMethod = Normalized; - for (XmlNode ¤tNode : node.children()) { + controller.mType = Skin; + controller.mMethod = Normalized; + + XmlNodeIterator xmlIt(node); + xmlIt.collectChildrenPreOrder(node); + XmlNode currentNode; + while (xmlIt.getNext(currentNode)) { + + //for (XmlNode ¤tNode : node.children()) { const std::string ¤tName = currentNode.name(); if (currentName == "morph") { - pController.mType = Morph; - pController.mMeshId = currentNode.attribute("source").as_string(); + controller.mType = Morph; + controller.mMeshId = currentNode.attribute("source").as_string(); int methodIndex = currentNode.attribute("method").as_int(); if (methodIndex > 0) { std::string method; XmlParser::getValueAsString(currentNode, method); if (method == "RELATIVE") { - pController.mMethod = Relative; + controller.mMethod = Relative; } } } else if (currentName == "skin") { - pController.mMeshId = currentNode.attribute("source").as_string(); + std::string id; + if (XmlParser::getStdStrAttribute(currentNode, "source", id)) { + controller.mMeshId = id.substr(1, id.size()-1); + } } else if (currentName == "bind_shape_matrix") { std::string v; XmlParser::getValueAsString(currentNode, v); const char *content = v.c_str(); for (unsigned int a = 0; a < 16; a++) { + SkipSpacesAndLineEnd(&content); // read a number - content = fast_atoreal_move(content, pController.mBindShapeMatrix[a]); + content = fast_atoreal_move(content, controller.mBindShapeMatrix[a]); // skip whitespace after it SkipSpacesAndLineEnd(&content); } } else if (currentName == "source") { ReadSource(currentNode); } else if (currentName == "joints") { - ReadControllerJoints(currentNode, pController); + ReadControllerJoints(currentNode, controller); } else if (currentName == "vertex_weights") { - ReadControllerWeights(currentNode, pController); + ReadControllerWeights(currentNode, controller); } else if (currentName == "targets") { for (XmlNode currentChildNode = node.first_child(); currentNode; currentNode = currentNode.next_sibling()) { const std::string ¤tChildName = currentChildNode.name(); @@ -661,9 +671,9 @@ void ColladaParser::ReadController(XmlNode &node, Collada::Controller &pControll const char *semantics = currentChildNode.attribute("semantic").as_string(); const char *source = currentChildNode.attribute("source").as_string(); if (strcmp(semantics, "MORPH_TARGET") == 0) { - pController.mMorphTarget = source + 1; + controller.mMorphTarget = source + 1; } else if (strcmp(semantics, "MORPH_WEIGHT") == 0) { - pController.mMorphWeight = source + 1; + controller.mMorphWeight = source + 1; } } } @@ -701,6 +711,7 @@ void ColladaParser::ReadControllerWeights(XmlNode &node, Collada::Controller &pC // Read vertex count from attributes and resize the array accordingly int vertexCount=0; XmlParser::getIntAttribute(node, "count", vertexCount); + pController.mWeightCounts.resize(vertexCount); for (XmlNode ¤tNode : node.children()) { const std::string ¤tName = currentNode.name(); @@ -726,7 +737,7 @@ void ColladaParser::ReadControllerWeights(XmlNode &node, Collada::Controller &pC throw DeadlyImportError("Unknown semantic \"", attrSemantic, "\" in data element"); } } else if (currentName == "vcount" && vertexCount > 0) { - const char *text = currentNode.value(); + const char *text = currentNode.text().as_string(); size_t numWeights = 0; for (std::vector::iterator it = pController.mWeightCounts.begin(); it != pController.mWeightCounts.end(); ++it) { if (*text == 0) { @@ -763,18 +774,15 @@ void ColladaParser::ReadControllerWeights(XmlNode &node, Collada::Controller &pC // ------------------------------------------------------------------------------------------------ // Reads the image library contents void ColladaParser::ReadImageLibrary(XmlNode &node) { - if (node.empty()) { - return; - } - for (XmlNode ¤tNode : node.children()) { const std::string ¤tName = currentNode.name(); if (currentName == "image") { - std::string id = currentNode.attribute("id").as_string(); - mImageLibrary[id] = Image(); - - // read on from there - ReadImage(currentNode, mImageLibrary[id]); + std::string id; + if (XmlParser::getStdStrAttribute( currentNode, "id", id )) { + mImageLibrary[id] = Image(); + // read on from there + ReadImage(currentNode, mImageLibrary[id]); + } } } } @@ -793,7 +801,7 @@ void ColladaParser::ReadImage(XmlNode &node, Collada::Image &pImage) { if (!currentNode.empty()) { // element content is filename - hopefully const char *sz = currentNode.text().as_string(); - if (sz) { + if (nullptr != sz) { aiString filepath(sz); UriDecodePath(filepath); pImage.mFileName = filepath.C_Str(); @@ -843,10 +851,6 @@ void ColladaParser::ReadImage(XmlNode &node, Collada::Image &pImage) { // ------------------------------------------------------------------------------------------------ // Reads the material library void ColladaParser::ReadMaterialLibrary(XmlNode &node) { - if (node.empty()) { - return; - } - std::map names; for (XmlNode ¤tNode : node.children()) { std::string id = currentNode.attribute("id").as_string(); @@ -873,10 +877,6 @@ void ColladaParser::ReadMaterialLibrary(XmlNode &node) { // ------------------------------------------------------------------------------------------------ // Reads the light library void ColladaParser::ReadLightLibrary(XmlNode &node) { - if (node.empty()) { - return; - } - for (XmlNode ¤tNode : node.children()) { const std::string ¤tName = currentNode.name(); if (currentName == "light") { @@ -891,10 +891,6 @@ void ColladaParser::ReadLightLibrary(XmlNode &node) { // ------------------------------------------------------------------------------------------------ // Reads the camera library void ColladaParser::ReadCameraLibrary(XmlNode &node) { - if (node.empty()) { - return; - } - for (XmlNode ¤tNode : node.children()) { const std::string ¤tName = currentNode.name(); if (currentName == "camera") { @@ -1738,14 +1734,16 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector indices; - if (expectedPointCount > 0) + if (expectedPointCount > 0) { indices.reserve(expectedPointCount * numOffsets); + } - if (pNumPrimitives > 0) // It is possible to not contain any indices - { + // It is possible to not contain any indices + if (pNumPrimitives > 0) { std::string v; XmlParser::getValueAsString(node, v); const char *content = v.c_str(); + SkipSpacesAndLineEnd(&content); while (*content != 0) { // read a value. // Hack: (thom) Some exporters put negative indices sometimes. We just try to carry on anyways. @@ -1772,21 +1770,24 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vector::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) { InputChannel &input = *it; - if (input.mResolved) + if (input.mResolved) { continue; + } // find accessor input.mResolved = &ResolveLibraryReference(mAccessorLibrary, input.mAccessor); // resolve accessor's data pointer as well, if necessary const Accessor *acc = input.mResolved; - if (!acc->mData) + if (!acc->mData) { acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource); + } } // and the same for the per-index channels for (std::vector::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) { InputChannel &input = *it; - if (input.mResolved) + if (input.mResolved) { continue; + } // ignore vertex pointer, it doesn't refer to an accessor if (input.mType == IT_Vertex) { @@ -1801,8 +1802,9 @@ size_t ColladaParser::ReadPrimitives(XmlNode &node, Mesh &pMesh, std::vectormData) + if (!acc->mData) { acc->mData = &ResolveLibraryReference(mDataLibrary, acc->mSource); + } } // For continued primitives, the given count does not come all in one

, but only one primitive per

@@ -1884,11 +1886,13 @@ void ColladaParser::CopyVertex(size_t currentVertex, size_t numOffsets, size_t n ai_assert((baseOffset + numOffsets - 1) < indices.size()); // extract per-vertex channels using the global per-vertex offset - for (std::vector::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) + for (std::vector::iterator it = pMesh.mPerVertexData.begin(); it != pMesh.mPerVertexData.end(); ++it) { ExtractDataObjectFromChannel(*it, indices[baseOffset + perVertexOffset], pMesh); + } // and extract per-index channels using there specified offset - for (std::vector::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) + for (std::vector::iterator it = pPerIndexChannels.begin(); it != pPerIndexChannels.end(); ++it) { ExtractDataObjectFromChannel(*it, indices[baseOffset + it->mOffset], pMesh); + } // store the vertex-data index for later assignment of bone vertex weights pMesh.mFacePosIndices.push_back(indices[baseOffset + perVertexOffset]); @@ -1912,8 +1916,9 @@ void ColladaParser::ReadPrimTriStrips(size_t numOffsets, size_t perVertexOffset, // Extracts a single object from an input channel and stores it in the appropriate mesh data array void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, size_t pLocalIndex, Mesh &pMesh) { // ignore vertex referrer - we handle them that separate - if (pInput.mType == IT_Vertex) + if (pInput.mType == IT_Vertex) { return; + } const Accessor &acc = *pInput.mResolved; if (pLocalIndex >= acc.mCount) { @@ -1926,86 +1931,93 @@ void ColladaParser::ExtractDataObjectFromChannel(const InputChannel &pInput, siz // assemble according to the accessors component sub-offset list. We don't care, yet, // what kind of object exactly we're extracting here ai_real obj[4]; - for (size_t c = 0; c < 4; ++c) + for (size_t c = 0; c < 4; ++c) { obj[c] = dataObject[acc.mSubOffset[c]]; + } // now we reinterpret it according to the type we're reading here switch (pInput.mType) { - case IT_Position: // ignore all position streams except 0 - there can be only one position - if (pInput.mIndex == 0) - pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2])); - else - ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported"); - break; - case IT_Normal: - // pad to current vertex count if necessary - if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1) - pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0)); - - // ignore all normal streams except 0 - there can be only one normal - if (pInput.mIndex == 0) - pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2])); - else - ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported"); - break; - case IT_Tangent: - // pad to current vertex count if necessary - if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1) - pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0)); - - // ignore all tangent streams except 0 - there can be only one tangent - if (pInput.mIndex == 0) - pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2])); - else - ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported"); - break; - case IT_Bitangent: - // pad to current vertex count if necessary - if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1) - pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1)); - - // ignore all bitangent streams except 0 - there can be only one bitangent - if (pInput.mIndex == 0) - pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2])); - else - ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported"); - break; - case IT_Texcoord: - // up to 4 texture coord sets are fine, ignore the others - if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) { - // pad to current vertex count if necessary - if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1) - pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(), - pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0)); - - pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2])); - if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) /* hack ... consider cleaner solution */ - pMesh.mNumUVComponents[pInput.mIndex] = 3; - } else { - ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping."); - } - break; - case IT_Color: - // up to 4 color sets are fine, ignore the others - if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) { - // pad to current vertex count if necessary - if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1) - pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(), - pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1)); - - aiColor4D result(0, 0, 0, 1); - for (size_t i = 0; i < pInput.mResolved->mSize; ++i) { - result[static_cast(i)] = obj[pInput.mResolved->mSubOffset[i]]; + case IT_Position: // ignore all position streams except 0 - there can be only one position + if (pInput.mIndex == 0) { + pMesh.mPositions.push_back(aiVector3D(obj[0], obj[1], obj[2])); + } else { + ASSIMP_LOG_ERROR("Collada: just one vertex position stream supported"); } - pMesh.mColors[pInput.mIndex].push_back(result); - } else { - ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping."); - } + break; + case IT_Normal: + // pad to current vertex count if necessary + if (pMesh.mNormals.size() < pMesh.mPositions.size() - 1) + pMesh.mNormals.insert(pMesh.mNormals.end(), pMesh.mPositions.size() - pMesh.mNormals.size() - 1, aiVector3D(0, 1, 0)); - break; - default: - // IT_Invalid and IT_Vertex - ai_assert(false && "shouldn't ever get here"); + // ignore all normal streams except 0 - there can be only one normal + if (pInput.mIndex == 0) { + pMesh.mNormals.push_back(aiVector3D(obj[0], obj[1], obj[2])); + } else { + ASSIMP_LOG_ERROR("Collada: just one vertex normal stream supported"); + } + break; + case IT_Tangent: + // pad to current vertex count if necessary + if (pMesh.mTangents.size() < pMesh.mPositions.size() - 1) + pMesh.mTangents.insert(pMesh.mTangents.end(), pMesh.mPositions.size() - pMesh.mTangents.size() - 1, aiVector3D(1, 0, 0)); + + // ignore all tangent streams except 0 - there can be only one tangent + if (pInput.mIndex == 0) { + pMesh.mTangents.push_back(aiVector3D(obj[0], obj[1], obj[2])); + } else { + ASSIMP_LOG_ERROR("Collada: just one vertex tangent stream supported"); + } + break; + case IT_Bitangent: + // pad to current vertex count if necessary + if (pMesh.mBitangents.size() < pMesh.mPositions.size() - 1) { + pMesh.mBitangents.insert(pMesh.mBitangents.end(), pMesh.mPositions.size() - pMesh.mBitangents.size() - 1, aiVector3D(0, 0, 1)); + } + + // ignore all bitangent streams except 0 - there can be only one bitangent + if (pInput.mIndex == 0) { + pMesh.mBitangents.push_back(aiVector3D(obj[0], obj[1], obj[2])); + } else { + ASSIMP_LOG_ERROR("Collada: just one vertex bitangent stream supported"); + } + break; + case IT_Texcoord: + // up to 4 texture coord sets are fine, ignore the others + if (pInput.mIndex < AI_MAX_NUMBER_OF_TEXTURECOORDS) { + // pad to current vertex count if necessary + if (pMesh.mTexCoords[pInput.mIndex].size() < pMesh.mPositions.size() - 1) + pMesh.mTexCoords[pInput.mIndex].insert(pMesh.mTexCoords[pInput.mIndex].end(), + pMesh.mPositions.size() - pMesh.mTexCoords[pInput.mIndex].size() - 1, aiVector3D(0, 0, 0)); + + pMesh.mTexCoords[pInput.mIndex].push_back(aiVector3D(obj[0], obj[1], obj[2])); + if (0 != acc.mSubOffset[2] || 0 != acc.mSubOffset[3]) { + pMesh.mNumUVComponents[pInput.mIndex] = 3; + } + } else { + ASSIMP_LOG_ERROR("Collada: too many texture coordinate sets. Skipping."); + } + break; + case IT_Color: + // up to 4 color sets are fine, ignore the others + if (pInput.mIndex < AI_MAX_NUMBER_OF_COLOR_SETS) { + // pad to current vertex count if necessary + if (pMesh.mColors[pInput.mIndex].size() < pMesh.mPositions.size() - 1) + pMesh.mColors[pInput.mIndex].insert(pMesh.mColors[pInput.mIndex].end(), + pMesh.mPositions.size() - pMesh.mColors[pInput.mIndex].size() - 1, aiColor4D(0, 0, 0, 1)); + + aiColor4D result(0, 0, 0, 1); + for (size_t i = 0; i < pInput.mResolved->mSize; ++i) { + result[static_cast(i)] = obj[pInput.mResolved->mSubOffset[i]]; + } + pMesh.mColors[pInput.mIndex].push_back(result); + } else { + ASSIMP_LOG_ERROR("Collada: too many vertex color sets. Skipping."); + } + + break; + default: + // IT_Invalid and IT_Vertex + ai_assert(false && "shouldn't ever get here"); } } diff --git a/code/AssetLib/FBX/FBXMaterial.cpp b/code/AssetLib/FBX/FBXMaterial.cpp index 9fe4ce5be..6997388b8 100644 --- a/code/AssetLib/FBX/FBXMaterial.cpp +++ b/code/AssetLib/FBX/FBXMaterial.cpp @@ -54,18 +54,16 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#include // std::transform #include "FBXUtil.h" namespace Assimp { namespace FBX { - using namespace Util; +using namespace Util; // ------------------------------------------------------------------------------------------------ -Material::Material(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Object(id,element,name) -{ +Material::Material(uint64_t id, const Element& element, const Document& doc, const std::string& name) : + Object(id,element,name) { const Scope& sc = GetRequiredScope(element); const Element* const ShadingModel = sc["ShadingModel"]; @@ -77,23 +75,21 @@ Material::Material(uint64_t id, const Element& element, const Document& doc, con if(ShadingModel) { shading = ParseTokenAsString(GetRequiredToken(*ShadingModel,0)); - } - else { + } else { DOMWarning("shading mode not specified, assuming phong",&element); shading = "phong"; } - std::string templateName; - // lower-case shading because Blender (for example) writes "Phong" - std::transform(shading.data(), shading.data() + shading.size(), std::addressof(shading[0]), Assimp::ToLower); + for (size_t i = 0; i < shading.length(); ++i) { + shading[i] = static_cast(tolower(shading[i])); + } + std::string templateName; if(shading == "phong") { templateName = "Material.FbxSurfacePhong"; - } - else if(shading == "lambert") { + } else if(shading == "lambert") { templateName = "Material.FbxSurfaceLambert"; - } - else { + } else { DOMWarning("shading mode not recognized: " + shading,&element); } @@ -102,20 +98,19 @@ Material::Material(uint64_t id, const Element& element, const Document& doc, con // resolve texture links const std::vector& conns = doc.GetConnectionsByDestinationSequenced(ID()); for(const Connection* con : conns) { - // texture link to properties, not objects - if (!con->PropertyName().length()) { + if ( 0 == con->PropertyName().length()) { continue; } const Object* const ob = con->SourceObject(); - if(!ob) { + if(nullptr == ob) { DOMWarning("failed to read source object for texture link, ignoring",&element); continue; } const Texture* const tex = dynamic_cast(ob); - if(!tex) { + if(nullptr == tex) { const LayeredTexture* const layeredTexture = dynamic_cast(ob); if(!layeredTexture) { DOMWarning("source object for texture link is not a texture or layered texture, ignoring",&element); @@ -128,9 +123,7 @@ Material::Material(uint64_t id, const Element& element, const Document& doc, con layeredTextures[prop] = layeredTexture; ((LayeredTexture*)layeredTexture)->fillTexture(doc); - } - else - { + } else { const std::string& prop = con->PropertyName(); if (textures.find(prop) != textures.end()) { DOMWarning("duplicate texture link: " + prop,&element); @@ -138,23 +131,20 @@ Material::Material(uint64_t id, const Element& element, const Document& doc, con textures[prop] = tex; } - } } // ------------------------------------------------------------------------------------------------ -Material::~Material() -{ +Material::~Material() { + // empty } - // ------------------------------------------------------------------------------------------------ -Texture::Texture(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Object(id,element,name) -, uvScaling(1.0f,1.0f) -, media(0) -{ +Texture::Texture(uint64_t id, const Element& element, const Document& doc, const std::string& name) : + Object(id,element,name), + uvScaling(1.0f,1.0f), + media(0) { const Scope& sc = GetRequiredScope(element); const Element* const Type = sc["Type"]; @@ -194,8 +184,7 @@ Texture::Texture(uint64_t id, const Element& element, const Document& doc, const crop[1] = ParseTokenAsInt(GetRequiredToken(*Cropping,1)); crop[2] = ParseTokenAsInt(GetRequiredToken(*Cropping,2)); crop[3] = ParseTokenAsInt(GetRequiredToken(*Cropping,3)); - } - else { + } else { // vc8 doesn't support the crop() syntax in initialization lists // (and vc9 WARNS about the new (i.e. compliant) behaviour). crop[0] = crop[1] = crop[2] = crop[3] = 0; @@ -226,7 +215,7 @@ Texture::Texture(uint64_t id, const Element& element, const Document& doc, const const std::vector& conns = doc.GetConnectionsByDestinationSequenced(ID()); for(const Connection* con : conns) { const Object* const ob = con->SourceObject(); - if(!ob) { + if (nullptr == ob) { DOMWarning("failed to read source object for texture link, ignoring",&element); continue; } @@ -240,46 +229,38 @@ Texture::Texture(uint64_t id, const Element& element, const Document& doc, const } -Texture::~Texture() -{ - +Texture::~Texture() { + // empty } -LayeredTexture::LayeredTexture(uint64_t id, const Element& element, const Document& /*doc*/, const std::string& name) -: Object(id,element,name) -,blendMode(BlendMode_Modulate) -,alpha(1) -{ +LayeredTexture::LayeredTexture(uint64_t id, const Element& element, const Document& /*doc*/, const std::string& name) : + Object(id,element,name), + blendMode(BlendMode_Modulate), + alpha(1) { const Scope& sc = GetRequiredScope(element); const Element* const BlendModes = sc["BlendModes"]; const Element* const Alphas = sc["Alphas"]; - - if(BlendModes!=0) - { + if (nullptr != BlendModes) { blendMode = (BlendMode)ParseTokenAsInt(GetRequiredToken(*BlendModes,0)); } - if(Alphas!=0) - { + if (nullptr != Alphas) { alpha = ParseTokenAsFloat(GetRequiredToken(*Alphas,0)); } } -LayeredTexture::~LayeredTexture() -{ - +LayeredTexture::~LayeredTexture() { + // empty } -void LayeredTexture::fillTexture(const Document& doc) -{ +void LayeredTexture::fillTexture(const Document& doc) { const std::vector& conns = doc.GetConnectionsByDestinationSequenced(ID()); - for(size_t i = 0; i < conns.size();++i) - { + for(size_t i = 0; i < conns.size();++i) { const Connection* con = conns.at(i); const Object* const ob = con->SourceObject(); - if(!ob) { + if (nullptr == ob) { DOMWarning("failed to read source object for texture link, ignoring",&element); continue; } @@ -290,13 +271,11 @@ void LayeredTexture::fillTexture(const Document& doc) } } - // ------------------------------------------------------------------------------------------------ -Video::Video(uint64_t id, const Element& element, const Document& doc, const std::string& name) -: Object(id,element,name) -, contentLength(0) -, content(0) -{ +Video::Video(uint64_t id, const Element& element, const Document& doc, const std::string& name) : + Object(id,element,name), + contentLength(0), + content(0) { const Scope& sc = GetRequiredScope(element); const Element* const Type = sc["Type"]; @@ -324,52 +303,43 @@ Video::Video(uint64_t id, const Element& element, const Document& doc, const std if (!token.IsBinary()) { if (*data != '"') { DOMError("embedded content is not surrounded by quotation marks", &element); - } - else { + } else { size_t targetLength = 0; auto numTokens = Content->Tokens().size(); // First time compute size (it could be large like 64Gb and it is good to allocate it once) - for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) - { + for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) { const Token& dataToken = GetRequiredToken(*Content, tokenIdx); size_t tokenLength = dataToken.end() - dataToken.begin() - 2; // ignore double quotes const char* base64data = dataToken.begin() + 1; const size_t outLength = Util::ComputeDecodedSizeBase64(base64data, tokenLength); - if (outLength == 0) - { + if (outLength == 0) { DOMError("Corrupted embedded content found", &element); } targetLength += outLength; } - if (targetLength == 0) - { + if (targetLength == 0) { DOMError("Corrupted embedded content found", &element); } content = new uint8_t[targetLength]; contentLength = static_cast(targetLength); size_t dst_offset = 0; - for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) - { + for (uint32_t tokenIdx = 0; tokenIdx < numTokens; ++tokenIdx) { const Token& dataToken = GetRequiredToken(*Content, tokenIdx); size_t tokenLength = dataToken.end() - dataToken.begin() - 2; // ignore double quotes const char* base64data = dataToken.begin() + 1; dst_offset += Util::DecodeBase64(base64data, tokenLength, content + dst_offset, targetLength - dst_offset); } - if (targetLength != dst_offset) - { + if (targetLength != dst_offset) { delete[] content; contentLength = 0; DOMError("Corrupted embedded content found", &element); } } - } - else if (static_cast(token.end() - data) < 5) { + } else if (static_cast(token.end() - data) < 5) { DOMError("binary data array is too short, need five (5) bytes for type signature and element count", &element); - } - else if (*data != 'R') { + } else if (*data != 'R') { DOMWarning("video content is not raw binary data, ignoring", &element); - } - else { + } else { // read number of elements uint32_t len = 0; ::memcpy(&len, data + 1, sizeof(len)); @@ -380,8 +350,7 @@ Video::Video(uint64_t id, const Element& element, const Document& doc, const std content = new uint8_t[len]; ::memcpy(content, data + 5, len); } - } catch (const runtime_error& runtimeError) - { + } catch (const runtime_error& runtimeError) { //we don't need the content data for contents that has already been loaded ASSIMP_LOG_VERBOSE_DEBUG_F("Caught exception in FBXMaterial (likely because content was already loaded): ", runtimeError.what()); @@ -392,14 +361,11 @@ Video::Video(uint64_t id, const Element& element, const Document& doc, const std } -Video::~Video() -{ - if(content) { - delete[] content; - } +Video::~Video() { + delete[] content; } } //!FBX } //!Assimp -#endif +#endif // ASSIMP_BUILD_NO_FBX_IMPORTER diff --git a/code/AssetLib/IFC/IFCGeometry.cpp b/code/AssetLib/IFC/IFCGeometry.cpp index 7e8a06bbb..d6d069fc4 100644 --- a/code/AssetLib/IFC/IFCGeometry.cpp +++ b/code/AssetLib/IFC/IFCGeometry.cpp @@ -656,7 +656,7 @@ void ProcessExtrudedArea(const Schema_2x3::IfcExtrudedAreaSolid& solid, const Te } } - if( openings && ((sides_with_openings == 1 && sides_with_openings) || (sides_with_v_openings == 2 && sides_with_v_openings)) ) { + if( openings && (sides_with_openings == 1 || sides_with_v_openings == 2 ) ) { IFCImporter::LogWarn("failed to resolve all openings, presumably their topology is not supported by Assimp"); } diff --git a/code/AssetLib/IFC/IFCOpenings.cpp b/code/AssetLib/IFC/IFCOpenings.cpp index e15691957..4bf74c0d3 100644 --- a/code/AssetLib/IFC/IFCOpenings.cpp +++ b/code/AssetLib/IFC/IFCOpenings.cpp @@ -1189,20 +1189,9 @@ bool GenerateOpenings(std::vector& openings, TempMesh* profile_data = opening.profileMesh.get(); bool is_2d_source = false; if (opening.profileMesh2D && norm_extrusion_dir.SquareLength() > 0) { - - if(std::fabs(norm_extrusion_dir * wall_extrusion_axis_norm) < 0.1) { - // horizontal extrusion - if (std::fabs(norm_extrusion_dir * nor) > 0.9) { - profile_data = opening.profileMesh2D.get(); - is_2d_source = true; - } - } - else { - // vertical extrusion - if (std::fabs(norm_extrusion_dir * nor) > 0.9) { - profile_data = opening.profileMesh2D.get(); - is_2d_source = true; - } + if (std::fabs(norm_extrusion_dir * nor) > 0.9) { + profile_data = opening.profileMesh2D.get(); + is_2d_source = true; } } std::vector profile_verts = profile_data->mVerts; diff --git a/code/AssetLib/M3D/m3d.h b/code/AssetLib/M3D/m3d.h index c25c633ba..dfc30aec3 100644 --- a/code/AssetLib/M3D/m3d.h +++ b/code/AssetLib/M3D/m3d.h @@ -1642,7 +1642,7 @@ static int _m3dstbi__expand_png_palette(_m3dstbi__png *a, unsigned char *palette static int _m3dstbi__parse_png_file(_m3dstbi__png *z, int scan, int req_comp) { unsigned char palette[1024], pal_img_n = 0; unsigned char has_trans = 0, tc[3] = {}; - _m3dstbi__uint16 tc16[3]; + _m3dstbi__uint16 tc16[3] = {}; _m3dstbi__uint32 ioff = 0, idata_limit = 0, i, pal_len = 0; int first = 1, k, interlace = 0, color = 0; _m3dstbi__context *s = z->s; diff --git a/code/AssetLib/Obj/ObjFileMtlImporter.cpp b/code/AssetLib/Obj/ObjFileMtlImporter.cpp index 283735912..bf1b70c90 100644 --- a/code/AssetLib/Obj/ObjFileMtlImporter.cpp +++ b/code/AssetLib/Obj/ObjFileMtlImporter.cpp @@ -122,8 +122,8 @@ void ObjFileMtlImporter::load() { { ++m_DataIt; getColorRGBA(&m_pModel->m_pCurrentMaterial->ambient); - } else if (*m_DataIt == 'd') // Diffuse color - { + } else if (*m_DataIt == 'd') { + // Diffuse color ++m_DataIt; getColorRGBA(&m_pModel->m_pCurrentMaterial->diffuse); } else if (*m_DataIt == 's') { @@ -144,7 +144,9 @@ void ObjFileMtlImporter::load() { } else if (*m_DataIt == 'r') { // Material transmission alpha value ++m_DataIt; - getFloatValue(m_pModel->m_pCurrentMaterial->alpha); + ai_real d; + getFloatValue(d); + m_pModel->m_pCurrentMaterial->alpha = static_cast(1.0) - d; } m_DataIt = skipLine(m_DataIt, m_DataItEnd, m_uiLine); } break; diff --git a/code/AssetLib/STL/STLExporter.cpp b/code/AssetLib/STL/STLExporter.cpp index fd4c41033..890ee166f 100644 --- a/code/AssetLib/STL/STLExporter.cpp +++ b/code/AssetLib/STL/STLExporter.cpp @@ -147,7 +147,7 @@ STLExporter::STLExporter(const char* _filename, const aiScene* pScene, bool expo for(unsigned int i = 0; i < pScene->mNumMeshes; ++i) { WriteMesh(pScene->mMeshes[ i ]); } - mOutput << EndSolidToken << name << endl; + mOutput << EndSolidToken << " " << name << endl; } } diff --git a/code/AssetLib/glTF/glTFCommon.cpp b/code/AssetLib/glTF/glTFCommon.cpp index 01ba31209..8a4b8927d 100644 --- a/code/AssetLib/glTF/glTFCommon.cpp +++ b/code/AssetLib/glTF/glTFCommon.cpp @@ -54,7 +54,7 @@ size_t DecodeBase64(const char *in, size_t inLength, uint8_t *&out) { } if (inLength < 4) { - out = 0; + out = nullptr; return 0; } diff --git a/code/AssetLib/glTF/glTFCommon.h b/code/AssetLib/glTF/glTFCommon.h index 6d402b0e3..cd5b552f5 100644 --- a/code/AssetLib/glTF/glTFCommon.h +++ b/code/AssetLib/glTF/glTFCommon.h @@ -107,7 +107,6 @@ public: f(file) {} ~IOStream() { fclose(f); - f = 0; } size_t Read(void *b, size_t sz, size_t n) { return fread(b, sz, n, f); } diff --git a/code/AssetLib/glTF2/glTF2Asset.h b/code/AssetLib/glTF2/glTF2Asset.h index 94cbef2cd..95bd5ad4b 100644 --- a/code/AssetLib/glTF2/glTF2Asset.h +++ b/code/AssetLib/glTF2/glTF2Asset.h @@ -376,87 +376,6 @@ struct Object { // Classes for each glTF top-level object type // -//! A typed view into a BufferView. A BufferView contains raw binary data. -//! An accessor provides a typed view into a BufferView or a subset of a BufferView -//! similar to how WebGL's vertexAttribPointer() defines an attribute in a buffer. -struct Accessor : public Object { - struct Sparse; - - Ref bufferView; //!< The ID of the bufferView. (required) - size_t byteOffset; //!< The offset relative to the start of the bufferView in bytes. (required) - ComponentType componentType; //!< The datatype of components in the attribute. (required) - size_t count; //!< The number of attributes referenced by this accessor. (required) - AttribType::Value type; //!< Specifies if the attribute is a scalar, vector, or matrix. (required) - std::vector max; //!< Maximum value of each component in this attribute. - std::vector min; //!< Minimum value of each component in this attribute. - std::unique_ptr sparse; - - unsigned int GetNumComponents(); - unsigned int GetBytesPerComponent(); - unsigned int GetElementSize(); - - inline uint8_t *GetPointer(); - - template - void ExtractData(T *&outData); - - void WriteData(size_t count, const void *src_buffer, size_t src_stride); - void WriteSparseValues(size_t count, const void *src_data, size_t src_dataStride); - void WriteSparseIndices(size_t count, const void *src_idx, size_t src_idxStride); - - //! Helper class to iterate the data - class Indexer { - friend struct Accessor; - - // This field is reported as not used, making it protectd is the easiest way to work around it without going to the bottom of what the problem is: - // ../code/glTF2/glTF2Asset.h:392:19: error: private field 'accessor' is not used [-Werror,-Wunused-private-field] - protected: - Accessor &accessor; - - private: - uint8_t *data; - size_t elemSize, stride; - - Indexer(Accessor &acc); - - public: - //! Accesses the i-th value as defined by the accessor - template - T GetValue(int i); - - //! Accesses the i-th value as defined by the accessor - inline unsigned int GetUInt(int i) { - return GetValue(i); - } - - inline bool IsValid() const { - return data != 0; - } - }; - - inline Indexer GetIndexer() { - return Indexer(*this); - } - - Accessor() {} - void Read(Value &obj, Asset &r); - - //sparse - struct Sparse { - size_t count; - ComponentType indicesType; - Ref indices; - size_t indicesByteOffset; - Ref values; - size_t valuesByteOffset; - - std::vector data; //!< Actual data, which may be defaulted to an array of zeros or the original data, with the sparse buffer view applied on top of it. - - void PopulateData(size_t numBytes, uint8_t *bytes); - void PatchData(unsigned int elementSize); - }; -}; - //! A buffer points to binary geometry, animation, or skins. struct Buffer : public Object { /********************* Types *********************/ @@ -594,6 +513,90 @@ struct BufferView : public Object { uint8_t *GetPointer(size_t accOffset); }; +//! A typed view into a BufferView. A BufferView contains raw binary data. +//! An accessor provides a typed view into a BufferView or a subset of a BufferView +//! similar to how WebGL's vertexAttribPointer() defines an attribute in a buffer. +struct Accessor : public Object { + struct Sparse; + + Ref bufferView; //!< The ID of the bufferView. (required) + size_t byteOffset; //!< The offset relative to the start of the bufferView in bytes. (required) + ComponentType componentType; //!< The datatype of components in the attribute. (required) + size_t count; //!< The number of attributes referenced by this accessor. (required) + AttribType::Value type; //!< Specifies if the attribute is a scalar, vector, or matrix. (required) + std::vector max; //!< Maximum value of each component in this attribute. + std::vector min; //!< Minimum value of each component in this attribute. + std::unique_ptr sparse; + std::unique_ptr decodedBuffer; // Packed decoded data, returned instead of original bufferView if present + + unsigned int GetNumComponents(); + unsigned int GetBytesPerComponent(); + unsigned int GetElementSize(); + + inline uint8_t *GetPointer(); + inline size_t GetStride(); + inline size_t GetMaxByteSize(); + + template + void ExtractData(T *&outData); + + void WriteData(size_t count, const void *src_buffer, size_t src_stride); + void WriteSparseValues(size_t count, const void *src_data, size_t src_dataStride); + void WriteSparseIndices(size_t count, const void *src_idx, size_t src_idxStride); + + //! Helper class to iterate the data + class Indexer { + friend struct Accessor; + + // This field is reported as not used, making it protectd is the easiest way to work around it without going to the bottom of what the problem is: + // ../code/glTF2/glTF2Asset.h:392:19: error: private field 'accessor' is not used [-Werror,-Wunused-private-field] + protected: + Accessor &accessor; + + private: + uint8_t *data; + size_t elemSize, stride; + + Indexer(Accessor &acc); + + public: + //! Accesses the i-th value as defined by the accessor + template + T GetValue(int i); + + //! Accesses the i-th value as defined by the accessor + inline unsigned int GetUInt(int i) { + return GetValue(i); + } + + inline bool IsValid() const { + return data != nullptr; + } + }; + + inline Indexer GetIndexer() { + return Indexer(*this); + } + + Accessor() {} + void Read(Value &obj, Asset &r); + + //sparse + struct Sparse { + size_t count; + ComponentType indicesType; + Ref indices; + size_t indicesByteOffset; + Ref values; + size_t valuesByteOffset; + + std::vector data; //!< Actual data, which may be defaulted to an array of zeros or the original data, with the sparse buffer view applied on top of it. + + void PopulateData(size_t numBytes, uint8_t *bytes); + void PatchData(unsigned int elementSize); + }; +}; + struct Camera : public Object { enum Type { Perspective, @@ -846,7 +849,7 @@ struct CustomExtension : public Object { CustomExtension() = default; - CustomExtension(const CustomExtension& other) + CustomExtension(const CustomExtension &other) : Object(other) , mStringValue(other.mStringValue) , mDoubleValue(other.mDoubleValue) @@ -1092,6 +1095,7 @@ public: bool KHR_materials_sheen; bool KHR_materials_clearcoat; bool KHR_materials_transmission; + bool KHR_draco_mesh_compression; } extensionsUsed; //! Keeps info about the required extensions @@ -1100,7 +1104,7 @@ public: } extensionsRequired; AssetMetadata asset; - Value* extras = nullptr; + Value *extras = nullptr; // Dictionaries for each type of object @@ -1122,7 +1126,7 @@ public: Ref scene; public: - Asset(IOSystem *io = 0) : + Asset(IOSystem *io = nullptr) : mIOSystem(io), asset(), accessors(*this, "accessors"), diff --git a/code/AssetLib/glTF2/glTF2Asset.inl b/code/AssetLib/glTF2/glTF2Asset.inl index 0e265efef..b234fd0bf 100644 --- a/code/AssetLib/glTF2/glTF2Asset.inl +++ b/code/AssetLib/glTF2/glTF2Asset.inl @@ -42,9 +42,40 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "AssetLib/glTF/glTFCommon.h" +#include #include #include -#include + +#ifdef ASSIMP_ENABLE_DRACO + +// Google draco library headers spew many warnings. Bad Google, no cookie +#if _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4018) // Signed/unsigned mismatch +#pragma warning(disable : 4804) // Unsafe use of type 'bool' +#elif defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wsign-compare" +#elif defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wbool-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#endif + +#include "draco/compression/decode.h" +#include "draco/core/decoder_buffer.h" + +#if _MSC_VER +#pragma warning(pop) +#elif defined(__clang__) +#pragma clang diagnostic pop +#elif defined(__GNUC__) +#pragma GCC diagnostic pop +#endif +#ifndef DRACO_MESH_COMPRESSION_SUPPORTED +#error glTF: KHR_draco_mesh_compression: draco library must have DRACO_MESH_COMPRESSION_SUPPORTED +#endif +#endif using namespace Assimp; @@ -146,35 +177,155 @@ inline static T MemberOrDefault(Value &obj, const char *id, T defaultValue) { inline Value *FindMember(Value &val, const char *id) { Value::MemberIterator it = val.FindMember(id); - return (it != val.MemberEnd()) ? &it->value : 0; + return (it != val.MemberEnd()) ? &it->value : nullptr; } inline Value *FindString(Value &val, const char *id) { Value::MemberIterator it = val.FindMember(id); - return (it != val.MemberEnd() && it->value.IsString()) ? &it->value : 0; + return (it != val.MemberEnd() && it->value.IsString()) ? &it->value : nullptr; } inline Value *FindNumber(Value &val, const char *id) { Value::MemberIterator it = val.FindMember(id); - return (it != val.MemberEnd() && it->value.IsNumber()) ? &it->value : 0; + return (it != val.MemberEnd() && it->value.IsNumber()) ? &it->value : nullptr; } inline Value *FindUInt(Value &val, const char *id) { Value::MemberIterator it = val.FindMember(id); - return (it != val.MemberEnd() && it->value.IsUint()) ? &it->value : 0; + return (it != val.MemberEnd() && it->value.IsUint()) ? &it->value : nullptr; } inline Value *FindArray(Value &val, const char *id) { Value::MemberIterator it = val.FindMember(id); - return (it != val.MemberEnd() && it->value.IsArray()) ? &it->value : 0; + return (it != val.MemberEnd() && it->value.IsArray()) ? &it->value : nullptr; } inline Value *FindObject(Value &val, const char *id) { Value::MemberIterator it = val.FindMember(id); - return (it != val.MemberEnd() && it->value.IsObject()) ? &it->value : 0; + return (it != val.MemberEnd() && it->value.IsObject()) ? &it->value : nullptr; +} + +inline Value *FindExtension(Value &val, const char *extensionId) { + if (Value *extensionList = FindObject(val, "extensions")) { + if (Value *extension = FindObject(*extensionList, extensionId)) { + return extension; + } + } + return nullptr; } } // namespace +#ifdef ASSIMP_ENABLE_DRACO + +template +inline void CopyFaceIndex_Draco(Buffer &decodedIndexBuffer, const draco::Mesh &draco_mesh) { + const size_t faceStride = sizeof(T) * 3; + for (draco::FaceIndex f(0); f < draco_mesh.num_faces(); ++f) { + const draco::Mesh::Face &face = draco_mesh.face(f); + T indices[3] = { static_cast(face[0].value()), static_cast(face[1].value()), static_cast(face[2].value()) }; + memcpy(decodedIndexBuffer.GetPointer() + (f.value() * faceStride), &indices[0], faceStride); + } +} + +inline void SetDecodedIndexBuffer_Draco(const draco::Mesh &dracoMesh, Mesh::Primitive &prim) { + if (!prim.indices || dracoMesh.num_faces() == 0) + return; + + // Create a decoded Index buffer (if there is one) + size_t componentBytes = prim.indices->GetBytesPerComponent(); + + std::unique_ptr decodedIndexBuffer(new Buffer()); + decodedIndexBuffer->Grow(dracoMesh.num_faces() * 3 * componentBytes); + + // If accessor uses the same size as draco implementation, copy the draco buffer directly + + // Usually uint32_t but shouldn't assume + if (sizeof(dracoMesh.face(draco::FaceIndex(0))[0]) == componentBytes) { + memcpy(decodedIndexBuffer->GetPointer(), &dracoMesh.face(draco::FaceIndex(0))[0], decodedIndexBuffer->byteLength); + return; + } + + // Not same size, convert + switch (componentBytes) { + case sizeof(uint32_t): + CopyFaceIndex_Draco(*decodedIndexBuffer, dracoMesh); + break; + case sizeof(uint16_t): + CopyFaceIndex_Draco(*decodedIndexBuffer, dracoMesh); + break; + case sizeof(uint8_t): + CopyFaceIndex_Draco(*decodedIndexBuffer, dracoMesh); + break; + default: + ai_assert(false); + break; + } + + // Assign this alternate data buffer to the accessor + prim.indices->decodedBuffer.swap(decodedIndexBuffer); +} + +template +static bool GetAttributeForAllPoints_Draco(const draco::Mesh &dracoMesh, + const draco::PointAttribute &dracoAttribute, + Buffer &outBuffer) { + size_t byteOffset = 0; + T values[4] = { 0, 0, 0, 0 }; + for (draco::PointIndex i(0); i < dracoMesh.num_points(); ++i) { + const draco::AttributeValueIndex val_index = dracoAttribute.mapped_index(i); + if (!dracoAttribute.ConvertValue(val_index, dracoAttribute.num_components(), values)) { + return false; + } + + memcpy(outBuffer.GetPointer() + byteOffset, &values[0], sizeof(T) * dracoAttribute.num_components()); + byteOffset += sizeof(T) * dracoAttribute.num_components(); + } + + return true; +} + +inline void SetDecodedAttributeBuffer_Draco(const draco::Mesh &dracoMesh, uint32_t dracoAttribId, Accessor &accessor) { + // Create decoded buffer + const draco::PointAttribute *pDracoAttribute = dracoMesh.GetAttributeByUniqueId(dracoAttribId); + if (pDracoAttribute == nullptr) { + throw DeadlyImportError("GLTF: Invalid draco attribute id: ", dracoAttribId); + } + + size_t componentBytes = accessor.GetBytesPerComponent(); + + std::unique_ptr decodedAttribBuffer(new Buffer()); + decodedAttribBuffer->Grow(dracoMesh.num_points() * pDracoAttribute->num_components() * componentBytes); + + switch (accessor.componentType) { + case ComponentType_BYTE: + GetAttributeForAllPoints_Draco(dracoMesh, *pDracoAttribute, *decodedAttribBuffer); + break; + case ComponentType_UNSIGNED_BYTE: + GetAttributeForAllPoints_Draco(dracoMesh, *pDracoAttribute, *decodedAttribBuffer); + break; + case ComponentType_SHORT: + GetAttributeForAllPoints_Draco(dracoMesh, *pDracoAttribute, *decodedAttribBuffer); + break; + case ComponentType_UNSIGNED_SHORT: + GetAttributeForAllPoints_Draco(dracoMesh, *pDracoAttribute, *decodedAttribBuffer); + break; + case ComponentType_UNSIGNED_INT: + GetAttributeForAllPoints_Draco(dracoMesh, *pDracoAttribute, *decodedAttribBuffer); + break; + case ComponentType_FLOAT: + GetAttributeForAllPoints_Draco(dracoMesh, *pDracoAttribute, *decodedAttribBuffer); + break; + default: + ai_assert(false); + break; + } + + // Assign this alternate data buffer to the accessor + accessor.decodedBuffer.swap(decodedAttribBuffer); +} + +#endif // ASSIMP_ENABLE_DRACO + // // LazyDict methods // @@ -197,7 +348,7 @@ inline LazyDict::~LazyDict() { template inline void LazyDict::AttachToDocument(Document &doc) { - Value *container = 0; + Value *container = nullptr; if (mExtId) { if (Value *exts = FindObject(doc, "extensions")) { @@ -214,7 +365,7 @@ inline void LazyDict::AttachToDocument(Document &doc) { template inline void LazyDict::DetachFromDocument() { - mDict = 0; + mDict = nullptr; } template @@ -382,18 +533,18 @@ inline void Buffer::Read(Value &obj, Asset &r) { glTFCommon::Util::DataURI dataURI; if (ParseDataURI(uri, it->GetStringLength(), dataURI)) { if (dataURI.base64) { - uint8_t *data = 0; + uint8_t *data = nullptr; this->byteLength = glTFCommon::Util::DecodeBase64(dataURI.data, dataURI.dataLength, data); this->mData.reset(data, std::default_delete()); if (statedLength > 0 && this->byteLength != statedLength) { throw DeadlyImportError("GLTF: buffer \"", id, "\", expected ", to_string(statedLength), - " bytes, but found ", to_string(dataURI.dataLength)); + " bytes, but found ", to_string(dataURI.dataLength)); } } else { // assume raw data if (statedLength != dataURI.dataLength) { throw DeadlyImportError("GLTF: buffer \"", id, "\", expected ", to_string(statedLength), - " bytes, but found ", to_string(dataURI.dataLength)); + " bytes, but found ", to_string(dataURI.dataLength)); } this->mData.reset(new uint8_t[dataURI.dataLength], std::default_delete()); @@ -401,10 +552,7 @@ inline void Buffer::Read(Value &obj, Asset &r) { } } else { // Local file if (byteLength > 0) { - std::string dir = !r.mCurrentAssetDir.empty() ? ( - r.mCurrentAssetDir.back() == '/' ? - r.mCurrentAssetDir : r.mCurrentAssetDir + '/' - ) : ""; + std::string dir = !r.mCurrentAssetDir.empty() ? (r.mCurrentAssetDir.back() == '/' ? r.mCurrentAssetDir : r.mCurrentAssetDir + '/') : ""; IOStream *file = r.OpenFile(dir + uri, "rb"); if (file) { @@ -575,9 +723,9 @@ inline void BufferView::Read(Value &obj, Asset &r) { } inline uint8_t *BufferView::GetPointer(size_t accOffset) { - if (!buffer) return 0; + if (!buffer) return nullptr; uint8_t *basePtr = buffer->GetPointer(); - if (!basePtr) return 0; + if (!basePtr) return nullptr; size_t offset = accOffset + byteOffset; if (buffer->EncodedRegion_Current != nullptr) { @@ -709,12 +857,15 @@ inline unsigned int Accessor::GetElementSize() { } inline uint8_t *Accessor::GetPointer() { + if (decodedBuffer) + return decodedBuffer->GetPointer(); + if (sparse) return sparse->data.data(); - if (!bufferView || !bufferView->buffer) return 0; + if (!bufferView || !bufferView->buffer) return nullptr; uint8_t *basePtr = bufferView->buffer->GetPointer(); - if (!basePtr) return 0; + if (!basePtr) return nullptr; size_t offset = byteOffset + bufferView->byteOffset; @@ -730,6 +881,22 @@ inline uint8_t *Accessor::GetPointer() { return basePtr + offset; } +inline size_t Accessor::GetStride() { + // Decoded buffer is always packed + if (decodedBuffer) + return GetElementSize(); + + // Sparse and normal bufferView + return (bufferView && bufferView->byteStride ? bufferView->byteStride : GetElementSize()); +} + +inline size_t Accessor::GetMaxByteSize() { + if (decodedBuffer) + return decodedBuffer->byteLength; + + return (bufferView ? bufferView->byteLength : sparse->data.size()); +} + namespace { inline void CopyData(size_t count, const uint8_t *src, size_t src_stride, @@ -761,7 +928,7 @@ void Accessor::ExtractData(T *&outData) { const size_t elemSize = GetElementSize(); const size_t totalSize = elemSize * count; - const size_t stride = bufferView && bufferView->byteStride ? bufferView->byteStride : elemSize; + const size_t stride = GetStride(); const size_t targetElemSize = sizeof(T); @@ -769,8 +936,8 @@ void Accessor::ExtractData(T *&outData) { throw DeadlyImportError("GLTF: elemSize ", elemSize, " > targetElemSize ", targetElemSize, " in ", getContextForErrorMessages(id, name)); } - const size_t maxSize = (bufferView ? bufferView->byteLength : sparse->data.size()); - if (count*stride > maxSize) { + const size_t maxSize = GetMaxByteSize(); + if (count * stride > maxSize) { throw DeadlyImportError("GLTF: count*stride ", (count * stride), " > maxSize ", maxSize, " in ", getContextForErrorMessages(id, name)); } @@ -828,14 +995,14 @@ inline Accessor::Indexer::Indexer(Accessor &acc) : accessor(acc), data(acc.GetPointer()), elemSize(acc.GetElementSize()), - stride(acc.bufferView && acc.bufferView->byteStride ? acc.bufferView->byteStride : elemSize) { + stride(acc.GetStride()) { } //! Accesses the i-th value as defined by the accessor template T Accessor::Indexer::GetValue(int i) { ai_assert(data); - ai_assert(i * stride < accessor.bufferView->byteLength); + ai_assert(i * stride < accessor.GetMaxByteSize()); // Ensure that the memcpy doesn't overwrite the local. const size_t sizeToCopy = std::min(elemSize, sizeof(T)); T value = T(); @@ -872,8 +1039,7 @@ inline void Image::Read(Value &obj, Asset &r) { if (Value *mtype = FindString(obj, "mimeType")) { this->mimeType = mtype->GetString(); } - if (!this->bufferView || this->mimeType.empty()) - { + if (!this->bufferView || this->mimeType.empty()) { throw DeadlyImportError("GLTF2: ", getContextForErrorMessages(id, name), " does not have a URI, so it must have a valid bufferView and mimetype"); } @@ -884,10 +1050,8 @@ inline void Image::Read(Value &obj, Asset &r) { this->mData.reset(new uint8_t[this->mDataLength]); memcpy(this->mData.get(), buffer->GetPointer() + this->bufferView->byteOffset, this->mDataLength); - } - else - { - throw DeadlyImportError("GLTF2: ", getContextForErrorMessages(id, name), " should have either a URI of a bufferView and mimetype" ); + } else { + throw DeadlyImportError("GLTF2: ", getContextForErrorMessages(id, name), " should have either a URI of a bufferView and mimetype"); } } } @@ -946,28 +1110,26 @@ inline void Texture::Read(Value &obj, Asset &r) { namespace { inline void SetTextureProperties(Asset &r, Value *prop, TextureInfo &out) { if (r.extensionsUsed.KHR_texture_transform) { - if (Value *extensions = FindObject(*prop, "extensions")) { + if (Value *pKHR_texture_transform = FindExtension(*prop, "KHR_texture_transform")) { out.textureTransformSupported = true; - if (Value *pKHR_texture_transform = FindObject(*extensions, "KHR_texture_transform")) { - if (Value *array = FindArray(*pKHR_texture_transform, "offset")) { - out.TextureTransformExt_t.offset[0] = (*array)[0].GetFloat(); - out.TextureTransformExt_t.offset[1] = (*array)[1].GetFloat(); - } else { - out.TextureTransformExt_t.offset[0] = 0; - out.TextureTransformExt_t.offset[1] = 0; - } + if (Value *array = FindArray(*pKHR_texture_transform, "offset")) { + out.TextureTransformExt_t.offset[0] = (*array)[0].GetFloat(); + out.TextureTransformExt_t.offset[1] = (*array)[1].GetFloat(); + } else { + out.TextureTransformExt_t.offset[0] = 0; + out.TextureTransformExt_t.offset[1] = 0; + } - if (!ReadMember(*pKHR_texture_transform, "rotation", out.TextureTransformExt_t.rotation)) { - out.TextureTransformExt_t.rotation = 0; - } + if (!ReadMember(*pKHR_texture_transform, "rotation", out.TextureTransformExt_t.rotation)) { + out.TextureTransformExt_t.rotation = 0; + } - if (Value *array = FindArray(*pKHR_texture_transform, "scale")) { - out.TextureTransformExt_t.scale[0] = (*array)[0].GetFloat(); - out.TextureTransformExt_t.scale[1] = (*array)[1].GetFloat(); - } else { - out.TextureTransformExt_t.scale[0] = 1; - out.TextureTransformExt_t.scale[1] = 1; - } + if (Value *array = FindArray(*pKHR_texture_transform, "scale")) { + out.TextureTransformExt_t.scale[0] = (*array)[0].GetFloat(); + out.TextureTransformExt_t.scale[1] = (*array)[1].GetFloat(); + } else { + out.TextureTransformExt_t.scale[0] = 1; + out.TextureTransformExt_t.scale[1] = 1; } } } @@ -1043,8 +1205,7 @@ inline void Material::Read(Value &material, Asset &r) { } } - if (r.extensionsUsed.KHR_texture_transform) { - } + // Extension KHR_texture_transform is handled in ReadTextureProperty if (r.extensionsUsed.KHR_materials_sheen) { if (Value *curMaterialSheen = FindObject(*extensions, "KHR_materials_sheen")) { @@ -1106,12 +1267,12 @@ void SetVector(vec3 &v, const float (&in)[3]) { inline void Material::SetDefaults() { //pbr materials SetVector(pbrMetallicRoughness.baseColorFactor, defaultBaseColor); - pbrMetallicRoughness.metallicFactor = 1.0; - pbrMetallicRoughness.roughnessFactor = 1.0; + pbrMetallicRoughness.metallicFactor = 1.0f; + pbrMetallicRoughness.roughnessFactor = 1.0f; SetVector(emissiveFactor, defaultEmissiveFactor); alphaMode = "OPAQUE"; - alphaCutoff = 0.5; + alphaCutoff = 0.5f; doubleSided = false; unlit = false; } @@ -1120,7 +1281,7 @@ inline void PbrSpecularGlossiness::SetDefaults() { //pbrSpecularGlossiness properties SetVector(diffuseFactor, defaultDiffuseFactor); SetVector(specularFactor, defaultSpecularFactor); - glossinessFactor = 1.0; + glossinessFactor = 1.0f; } inline void MaterialSheen::SetDefaults() { @@ -1192,6 +1353,14 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) { Primitive &prim = this->primitives[i]; prim.mode = MemberOrDefault(primitive, "mode", PrimitiveMode_TRIANGLES); + if (Value *indices = FindUInt(primitive, "indices")) { + prim.indices = pAsset_Root.accessors.Retrieve(indices->GetUint()); + } + + if (Value *material = FindUInt(primitive, "material")) { + prim.material = pAsset_Root.materials.Retrieve(material->GetUint()); + } + if (Value *attrs = FindObject(primitive, "attributes")) { for (Value::MemberIterator it = attrs->MemberBegin(); it != attrs->MemberEnd(); ++it) { if (!it->value.IsUint()) continue; @@ -1200,11 +1369,12 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) { // and WEIGHT.Attribute semantics can be of the form[semantic]_[set_index], e.g., TEXCOORD_0, TEXCOORD_1, etc. int undPos = 0; - Mesh::AccessorList *vec = 0; + Mesh::AccessorList *vec = nullptr; if (GetAttribVector(prim, attr, vec, undPos)) { size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0; if ((*vec).size() != idx) { - throw DeadlyImportError("GLTF: Invalid attribute: ", attr, ". All indices for indexed attribute semantics must start with 0 and be continuous positive integers: TEXCOORD_0, TEXCOORD_1, etc."); + throw DeadlyImportError("GLTF: Invalid attribute in mesh: ", name, " primitive: ", i, "attrib: ", attr, + ". All indices for indexed attribute semantics must start with 0 and be continuous positive integers: TEXCOORD_0, TEXCOORD_1, etc."); } (*vec).resize(idx + 1); (*vec)[idx] = pAsset_Root.accessors.Retrieve(it->value.GetUint()); @@ -1212,6 +1382,69 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) { } } +#ifdef ASSIMP_ENABLE_DRACO + // KHR_draco_mesh_compression spec: Draco can only be used for glTF Triangles or Triangle Strips + if (pAsset_Root.extensionsUsed.KHR_draco_mesh_compression && (prim.mode == PrimitiveMode_TRIANGLES || prim.mode == PrimitiveMode_TRIANGLE_STRIP)) { + // Look for draco mesh compression extension and bufferView + // Skip if any missing + if (Value *dracoExt = FindExtension(primitive, "KHR_draco_mesh_compression")) { + if (Value *bufView = FindUInt(*dracoExt, "bufferView")) { + // Attempt to load indices and attributes using draco compression + auto bufferView = pAsset_Root.bufferViews.Retrieve(bufView->GetUint()); + // Attempt to perform the draco decode on the buffer data + const char *bufferViewData = reinterpret_cast(bufferView->buffer->GetPointer() + bufferView->byteOffset); + draco::DecoderBuffer decoderBuffer; + decoderBuffer.Init(bufferViewData, bufferView->byteLength); + draco::Decoder decoder; + auto decodeResult = decoder.DecodeMeshFromBuffer(&decoderBuffer); + if (!decodeResult.ok()) { + // A corrupt Draco isn't actually fatal if the primitive data is also provided in a standard buffer, but does anyone do that? + throw DeadlyImportError("GLTF: Invalid Draco mesh compression in mesh: ", name, " primitive: ", i, ": ", decodeResult.status().error_msg_string()); + } + + // Now we have a draco mesh + const std::unique_ptr &pDracoMesh = decodeResult.value(); + + // Redirect the accessors to the decoded data + + // Indices + SetDecodedIndexBuffer_Draco(*pDracoMesh, prim); + + // Vertex attributes + if (Value *attrs = FindObject(*dracoExt, "attributes")) { + for (Value::MemberIterator it = attrs->MemberBegin(); it != attrs->MemberEnd(); ++it) { + if (!it->value.IsUint()) continue; + const char *attr = it->name.GetString(); + + int undPos = 0; + Mesh::AccessorList *vec = nullptr; + if (GetAttribVector(prim, attr, vec, undPos)) { + size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0; + if (idx >= (*vec).size()) { + throw DeadlyImportError("GLTF: Invalid draco attribute in mesh: ", name, " primitive: ", i, " attrib: ", attr, + ". All indices for indexed attribute semantics must start with 0 and be continuous positive integers: TEXCOORD_0, TEXCOORD_1, etc."); + } + + if (!(*vec)[idx]) { + throw DeadlyImportError("GLTF: Invalid draco attribute in mesh: ", name, " primitive: ", i, " attrib: ", attr, + ". All draco-encoded attributes must also define an accessor."); + } + + Accessor &attribAccessor = *(*vec)[idx]; + if (attribAccessor.count == 0) + throw DeadlyImportError("GLTF: Invalid draco attribute in mesh: ", name, " primitive: ", i, " attrib: ", attr); + + // Redirect this accessor to the appropriate Draco vertex attribute data + const uint32_t dracoAttribId = it->value.GetUint(); + SetDecodedAttributeBuffer_Draco(*pDracoMesh, dracoAttribId, attribAccessor); + } + } + } + } + } + } +#endif + Value *targetsArray = FindArray(primitive, "targets"); if (nullptr != targetsArray) { prim.targets.resize(targetsArray->Size()); @@ -1227,7 +1460,7 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) { const char *attr = it->name.GetString(); // Valid attribute semantics include POSITION, NORMAL, TANGENT int undPos = 0; - Mesh::AccessorList *vec = 0; + Mesh::AccessorList *vec = nullptr; if (GetAttribTargetVector(prim, j, attr, vec, undPos)) { size_t idx = (attr[undPos] == '_') ? atoi(attr + undPos + 1) : 0; if ((*vec).size() <= idx) { @@ -1238,14 +1471,6 @@ inline void Mesh::Read(Value &pJSON_Object, Asset &pAsset_Root) { } } } - - if (Value *indices = FindUInt(primitive, "indices")) { - prim.indices = pAsset_Root.accessors.Retrieve(indices->GetUint()); - } - - if (Value *material = FindUInt(primitive, "material")) { - prim.material = pAsset_Root.materials.Retrieve(material->GetUint()); - } } } @@ -1331,25 +1556,22 @@ inline void Light::Read(Value &obj, Asset & /*r*/) { } } -inline CustomExtension ReadExtensions(const char *name, Value& obj) { +inline CustomExtension ReadExtensions(const char *name, Value &obj) { CustomExtension ret; ret.name = name; if (obj.IsObject()) { ret.mValues.isPresent = true; for (auto it = obj.MemberBegin(); it != obj.MemberEnd(); ++it) { - auto& val = it->value; + auto &val = it->value; ret.mValues.value.push_back(ReadExtensions(it->name.GetString(), val)); } - } - else if (obj.IsArray()) { + } else if (obj.IsArray()) { ret.mValues.value.reserve(obj.Size()); ret.mValues.isPresent = true; - for (unsigned int i = 0; i < obj.Size(); ++i) - { + for (unsigned int i = 0; i < obj.Size(); ++i) { ret.mValues.value.push_back(ReadExtensions(name, obj[i])); } - } - else if (obj.IsNumber()) { + } else if (obj.IsNumber()) { if (obj.IsUint64()) { ret.mUint64Value.value = obj.GetUint64(); ret.mUint64Value.isPresent = true; @@ -1360,12 +1582,10 @@ inline CustomExtension ReadExtensions(const char *name, Value& obj) { ret.mDoubleValue.value = obj.GetDouble(); ret.mDoubleValue.isPresent = true; } - } - else if (obj.IsString()) { + } else if (obj.IsString()) { ReadValue(obj, ret.mStringValue); ret.mStringValue.isPresent = true; - } - else if (obj.IsBool()) { + } else if (obj.IsBool()) { ret.mBoolValue.value = obj.GetBool(); ret.mBoolValue.isPresent = true; } @@ -1411,7 +1631,7 @@ inline void Node::Read(Value &obj, Asset &r) { } } - // Do not retrieve a skin here, just take a reference, to avoid infinite recursion + // Do not retrieve a skin here, just take a reference, to avoid infinite recursion // Skins will be properly loaded later Value *curSkin = FindUInt(obj, "skin"); if (nullptr != curSkin) { @@ -1641,7 +1861,7 @@ inline void Asset::Load(const std::string &pFile, bool isBinary) { if (0 != strncmp(pFile.c_str(), AI_MEMORYIO_MAGIC_FILENAME, AI_MEMORYIO_MAGIC_FILENAME_LENGTH)) { mCurrentAssetDir = glTFCommon::getCurrentAssetDir(pFile); } - + shared_ptr stream(OpenFile(pFile.c_str(), "rb", true)); if (!stream) { throw DeadlyImportError("GLTF: Could not open file for reading"); @@ -1693,10 +1913,12 @@ inline void Asset::Load(const std::string &pFile, bool isBinary) { ReadExtensionsUsed(doc); ReadExtensionsRequired(doc); - // Currently Draco is not supported +#ifndef ASSIMP_ENABLE_DRACO + // Is Draco required? if (extensionsRequired.KHR_draco_mesh_compression) { - throw DeadlyImportError("GLTF: Draco mesh compression not currently supported."); + throw DeadlyImportError("GLTF: Draco mesh compression not supported."); } +#endif // Prepare the dictionaries for (size_t i = 0; i < mDicts.size(); ++i) { @@ -1784,6 +2006,7 @@ inline void Asset::ReadExtensionsUsed(Document &doc) { CHECK_EXT(KHR_materials_sheen); CHECK_EXT(KHR_materials_clearcoat); CHECK_EXT(KHR_materials_transmission); + CHECK_EXT(KHR_draco_mesh_compression); #undef CHECK_EXT } @@ -1792,12 +2015,12 @@ inline IOStream *Asset::OpenFile(std::string path, const char *mode, bool /*abso #ifdef ASSIMP_API return mIOSystem->Open(path, mode); #else - if (path.size() < 2) return 0; + if (path.size() < 2) return nullptr; if (!absolute && path[1] != ':' && path[0] != '/') { // relative? path = mCurrentAssetDir + path; } FILE *f = fopen(path.c_str(), mode); - return f ? new IOStream(f) : 0; + return f ? new IOStream(f) : nullptr; #endif } @@ -1831,7 +2054,7 @@ inline std::string Asset::FindUniqueID(const std::string &str, const char *suffi } #if _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif // _MSC_VER } // namespace glTF2 diff --git a/code/AssetLib/glTF2/glTF2AssetWriter.inl b/code/AssetLib/glTF2/glTF2AssetWriter.inl index 65fddaec5..ab30e418a 100644 --- a/code/AssetLib/glTF2/glTF2AssetWriter.inl +++ b/code/AssetLib/glTF2/glTF2AssetWriter.inl @@ -571,7 +571,6 @@ namespace glTF2 { inline void Write(Value& obj, Node& n, AssetWriter& w) { - if (n.matrix.isPresent) { Value val; obj.AddMember("matrix", MakeValue(val, n.matrix.value, w.mAl).Move(), w.mAl); @@ -597,14 +596,13 @@ namespace glTF2 { obj.AddMember("mesh", n.meshes[0]->index, w.mAl); } - AddRefsVector(obj, "skeletons", n.skeletons, w.mAl); - if (n.skin) { obj.AddMember("skin", n.skin->index, w.mAl); } - - if (!n.jointName.empty()) { - obj.AddMember("jointName", n.jointName, w.mAl); + + //gltf2 spec does not support "skeletons" under node + if(n.skeletons.size()) { + AddRefsVector(obj, "skeletons", n.skeletons, w.mAl); } } diff --git a/code/AssetLib/glTF2/glTF2Exporter.cpp b/code/AssetLib/glTF2/glTF2Exporter.cpp index 565117ddb..e34125258 100644 --- a/code/AssetLib/glTF2/glTF2Exporter.cpp +++ b/code/AssetLib/glTF2/glTF2Exporter.cpp @@ -1095,6 +1095,7 @@ void glTF2Exporter::ExportMeshes() //---------------------------------------- // Finish the skin // Create the Accessor for skinRef->inverseBindMatrices + bool bAddCustomizedProperty = this->mProperties->HasPropertyBool("GLTF2_CUSTOMIZE_PROPERTY"); if (createSkin) { mat4* invBindMatrixData = new mat4[inverseBindMatricesData.size()]; for ( unsigned int idx_joint = 0; idx_joint < inverseBindMatricesData.size(); ++idx_joint) { @@ -1110,7 +1111,7 @@ void glTF2Exporter::ExportMeshes() // Identity Matrix =====> skinRef->bindShapeMatrix // Temporary. Hard-coded identity matrix here - skinRef->bindShapeMatrix.isPresent = true; + skinRef->bindShapeMatrix.isPresent = bAddCustomizedProperty; IdentityMatrix4(skinRef->bindShapeMatrix.value); // Find nodes that contain a mesh with bones and add "skeletons" and "skin" attributes to those nodes. @@ -1131,7 +1132,8 @@ void glTF2Exporter::ExportMeshes() std::string meshID = mesh->id; FindMeshNode(rootNode, meshNode, meshID); Ref rootJoint = FindSkeletonRootJoint(skinRef); - meshNode->skeletons.push_back(rootJoint); + if(bAddCustomizedProperty) + meshNode->skeletons.push_back(rootJoint); meshNode->skin = skinRef; } delete[] invBindMatrixData; @@ -1229,7 +1231,7 @@ unsigned int glTF2Exporter::ExportNode(const aiNode* n, Ref& parent) node->name = name; if (!n->mTransformation.IsIdentity()) { - if (mScene->mNumAnimations > 0) { + if (mScene->mNumAnimations > 0 || (mProperties && mProperties->HasPropertyBool("GLTF2_NODE_IN_TRS"))) { aiQuaternion quaternion; n->mTransformation.Decompose(*reinterpret_cast(&node->scale.value), quaternion, *reinterpret_cast(&node->translation.value)); @@ -1386,6 +1388,7 @@ void glTF2Exporter::ExportAnimations() nameAnim = anim->mName.C_Str(); } Ref animRef = mAsset->animations.Create(nameAnim); + animRef->name = nameAnim; for (unsigned int channelIndex = 0; channelIndex < anim->mNumChannels; ++channelIndex) { const aiNodeAnim* nodeChannel = anim->mChannels[channelIndex]; diff --git a/code/AssetLib/glTF2/glTF2Importer.cpp b/code/AssetLib/glTF2/glTF2Importer.cpp index ac3b7144e..78ee446e2 100644 --- a/code/AssetLib/glTF2/glTF2Importer.cpp +++ b/code/AssetLib/glTF2/glTF2Importer.cpp @@ -383,6 +383,22 @@ static inline bool CheckValidFacesIndices(aiFace *faces, unsigned nFaces, unsign } #endif // ASSIMP_BUILD_DEBUG +template +aiColor4D* GetVertexColorsForType(glTF2::Ref input) { + float max = std::numeric_limits::max(); + aiColor4t* colors; + input->ExtractData(colors); + auto output = new aiColor4D[input->count]; + for (size_t i = 0; i < input->count; i++) { + output[i] = aiColor4D( + colors[i].r / max, colors[i].g / max, + colors[i].b / max, colors[i].a / max + ); + } + delete[] colors; + return output; +} + void glTF2Importer::ImportMeshes(glTF2::Asset &r) { ASSIMP_LOG_DEBUG_F("Importing ", r.meshes.Size(), " meshes"); std::vector> meshes; @@ -436,24 +452,32 @@ void glTF2Importer::ImportMeshes(glTF2::Asset &r) { } if (attr.normal.size() > 0 && attr.normal[0]) { - attr.normal[0]->ExtractData(aim->mNormals); + if (attr.normal[0]->count != aim->mNumVertices) { + DefaultLogger::get()->warn("Normal count in mesh \"" + mesh.name + "\" does not match the vertex count, normals ignored."); + } else { + attr.normal[0]->ExtractData(aim->mNormals); - // only extract tangents if normals are present - if (attr.tangent.size() > 0 && attr.tangent[0]) { - // generate bitangents from normals and tangents according to spec - Tangent *tangents = nullptr; + // only extract tangents if normals are present + if (attr.tangent.size() > 0 && attr.tangent[0]) { + if (attr.tangent[0]->count != aim->mNumVertices) { + DefaultLogger::get()->warn("Tangent count in mesh \"" + mesh.name + "\" does not match the vertex count, tangents ignored."); + } else { + // generate bitangents from normals and tangents according to spec + Tangent *tangents = nullptr; - attr.tangent[0]->ExtractData(tangents); + attr.tangent[0]->ExtractData(tangents); - aim->mTangents = new aiVector3D[aim->mNumVertices]; - aim->mBitangents = new aiVector3D[aim->mNumVertices]; + aim->mTangents = new aiVector3D[aim->mNumVertices]; + aim->mBitangents = new aiVector3D[aim->mNumVertices]; - for (unsigned int i = 0; i < aim->mNumVertices; ++i) { - aim->mTangents[i] = tangents[i].xyz; - aim->mBitangents[i] = (aim->mNormals[i] ^ tangents[i].xyz) * tangents[i].w; + for (unsigned int i = 0; i < aim->mNumVertices; ++i) { + aim->mTangents[i] = tangents[i].xyz; + aim->mBitangents[i] = (aim->mNormals[i] ^ tangents[i].xyz) * tangents[i].w; + } + + delete[] tangents; + } } - - delete[] tangents; } } @@ -463,7 +487,17 @@ void glTF2Importer::ImportMeshes(glTF2::Asset &r) { "\" does not match the vertex count"); continue; } - attr.color[c]->ExtractData(aim->mColors[c]); + + auto componentType = attr.color[c]->componentType; + if (componentType == glTF2::ComponentType_FLOAT) { + attr.color[c]->ExtractData(aim->mColors[c]); + } else { + if (componentType == glTF2::ComponentType_UNSIGNED_BYTE) { + aim->mColors[c] = GetVertexColorsForType(attr.color[c]); + } else if (componentType == glTF2::ComponentType_UNSIGNED_SHORT) { + aim->mColors[c] = GetVertexColorsForType(attr.color[c]); + } + } } for (size_t tc = 0; tc < attr.texcoord.size() && tc < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++tc) { if (!attr.texcoord[tc]) { diff --git a/code/CMakeLists.txt b/code/CMakeLists.txt index bbcad86e5..dc5e0523a 100644 --- a/code/CMakeLists.txt +++ b/code/CMakeLists.txt @@ -43,7 +43,7 @@ # 3) Add libassimp using the file lists (eliminates duplication of file names between # source groups and library command) # -cmake_minimum_required( VERSION 3.0 ) +cmake_minimum_required( VERSION 3.10 ) SET( HEADER_PATH ../include/assimp ) if(NOT ANDROID AND ASSIMP_ANDROID_JNIIOSYSTEM) @@ -665,6 +665,10 @@ if (NOT ASSIMP_NO_EXPORT) AssetLib/3MF/D3MFExporter.h AssetLib/3MF/D3MFExporter.cpp) + ADD_ASSIMP_EXPORTER( PBRT + Pbrt/PbrtExporter.h + Pbrt/PbrtExporter.cpp) + ADD_ASSIMP_EXPORTER( ASSJSON AssetLib/Assjson/cencode.c AssetLib/Assjson/cencode.h @@ -1117,6 +1121,11 @@ IF (ASSIMP_BUILD_NONFREE_C4D_IMPORTER) INCLUDE_DIRECTORIES(${C4D_INCLUDES}) ENDIF () +IF (ASSIMP_BUILD_DRACO) + INCLUDE_DIRECTORIES(${draco_INCLUDE_DIRS}) + ADD_DEFINITIONS( -DASSIMP_ENABLE_DRACO ) +ENDIF() + ADD_LIBRARY( assimp ${assimp_src} ) ADD_LIBRARY(assimp::assimp ALIAS assimp) @@ -1148,8 +1157,15 @@ IF(ASSIMP_HUNTER_ENABLED) zip::zip pugixml ) + + if (ASSIMP_BUILD_DRACO) + target_link_libraries(assimp PUBLIC ${draco_LIBRARIES}) + endif() ELSE() - TARGET_LINK_LIBRARIES(assimp ${ZLIB_LIBRARIES} ${OPENDDL_PARSER_LIBRARIES} ) + TARGET_LINK_LIBRARIES(assimp ${ZLIB_LIBRARIES} ${OPENDDL_PARSER_LIBRARIES}) + if (ASSIMP_BUILD_DRACO) + target_link_libraries(assimp ${draco_LIBRARIES}) + endif() ENDIF() if(ASSIMP_ANDROID_JNIIOSYSTEM) diff --git a/code/Common/Exporter.cpp b/code/Common/Exporter.cpp index 207b93fc7..5f78a897c 100644 --- a/code/Common/Exporter.cpp +++ b/code/Common/Exporter.cpp @@ -138,6 +138,9 @@ void ExportSceneM3DA(const char*, IOSystem*, const aiScene*, const ExportPropert #ifndef ASSIMP_BUILD_NO_ASSJSON_EXPORTER void ExportAssimp2Json(const char* , IOSystem*, const aiScene* , const Assimp::ExportProperties*); #endif +#ifndef ASSIMP_BUILD_NO_PBRT_EXPORTER +void ExportScenePbrt(const char*, IOSystem*, const aiScene*, const ExportProperties*); +#endif static void setupExporterArray(std::vector &exporters) { (void)exporters; @@ -221,6 +224,10 @@ static void setupExporterArray(std::vector &exporte exporters.push_back(Exporter::ExportFormatEntry("3mf", "The 3MF-File-Format", "3mf", &ExportScene3MF, 0)); #endif +#ifndef ASSIMP_BUILD_NO_PBRT_EXPORTER + exporters.push_back(Exporter::ExportFormatEntry("pbrt", "pbrt-v4 scene description file", "pbrt", &ExportScenePbrt, aiProcess_Triangulate | aiProcess_SortByPType)); +#endif + #ifndef ASSIMP_BUILD_NO_ASSJSON_EXPORTER exporters.push_back(Exporter::ExportFormatEntry("assjson", "Assimp JSON Document", "json", &ExportAssimp2Json, 0)); #endif diff --git a/code/Common/Importer.cpp b/code/Common/Importer.cpp index 38eb63f40..ccd664d91 100644 --- a/code/Common/Importer.cpp +++ b/code/Common/Importer.cpp @@ -78,6 +78,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include +#include #include #include #include diff --git a/code/Common/Importer.h b/code/Common/Importer.h index eb70bc38f..d07a67030 100644 --- a/code/Common/Importer.h +++ b/code/Common/Importer.h @@ -44,6 +44,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef INCLUDED_AI_IMPORTER_H #define INCLUDED_AI_IMPORTER_H +#include #include #include #include diff --git a/code/Pbrt/PbrtExporter.cpp b/code/Pbrt/PbrtExporter.cpp new file mode 100644 index 000000000..f1e4e46d8 --- /dev/null +++ b/code/Pbrt/PbrtExporter.cpp @@ -0,0 +1,949 @@ +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2020, assimp team + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above +copyright notice, this list of conditions and the +following disclaimer. + +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the +following disclaimer in the documentation and/or other +materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its +contributors may be used to endorse or promote products +derived from this software without specific prior +written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/* TODO: + +Material improvements: +- don't export embedded textures that we're not going to use +- diffuse roughness +- what is with the uv mapping, uv transform not coming through?? +- metal? glass? mirror? detect these better? + - eta/k from RGB? +- emissive textures: warn at least + +Other: +- use aiProcess_GenUVCoords if needed to handle spherical/planar uv mapping? +- don't build up a big string in memory but write directly to a file +- aiProcess_Triangulate meshes to get triangles only? +- animation (allow specifying a time) + + */ + +#ifndef ASSIMP_BUILD_NO_EXPORT +#ifndef ASSIMP_BUILD_NO_PBRT_EXPORTER + +#include "PbrtExporter.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +using namespace Assimp; + +namespace Assimp { + +void ExportScenePbrt ( + const char* pFile, + IOSystem* pIOSystem, + const aiScene* pScene, + const ExportProperties* /*pProperties*/ +){ + std::string path = DefaultIOSystem::absolutePath(std::string(pFile)); + std::string file = DefaultIOSystem::completeBaseName(std::string(pFile)); + + // initialize the exporter + PbrtExporter exporter(pScene, pIOSystem, path, file); +} + +} // end of namespace Assimp + +// Constructor +PbrtExporter::PbrtExporter ( + const aiScene* pScene, IOSystem* pIOSystem, + const std::string path, const std::string file) +: mScene(pScene), + mIOSystem(pIOSystem), + mPath(path), + mFile(file) +{ + // Export embedded textures. + if (mScene->mNumTextures > 0) + if (!mIOSystem->CreateDirectory("textures")) + throw DeadlyExportError("Could not create textures/ directory."); + for (unsigned int i = 0; i < mScene->mNumTextures; ++i) { + aiTexture* tex = mScene->mTextures[i]; + std::string fn = CleanTextureFilename(tex->mFilename, false); + std::cerr << "Writing embedded texture: " << tex->mFilename.C_Str() << " -> " + << fn << "\n"; + + std::unique_ptr outfile(mIOSystem->Open(fn, "wb")); + if (!outfile) { + throw DeadlyExportError("could not open output texture file: " + fn); + } + if (tex->mHeight == 0) { + // It's binary data + outfile->Write(tex->pcData, tex->mWidth, 1); + } else { + std::cerr << fn << ": TODO handle uncompressed embedded textures.\n"; + } + } + +#if 0 + // Debugging: print the full node hierarchy + std::function visitNode; + visitNode = [&](aiNode* node, int depth) { + for (int i = 0; i < depth; ++i) std::cerr << " "; + std::cerr << node->mName.C_Str() << "\n"; + for (int i = 0; i < node->mNumChildren; ++i) + visitNode(node->mChildren[i], depth + 1); + }; + visitNode(mScene->mRootNode, 0); +#endif + + mOutput.precision(ASSIMP_AI_REAL_TEXT_PRECISION); + + // Write everything out + WriteMetaData(); + WriteCameras(); + WriteWorldDefinition(); + + // And write the file to disk... + std::unique_ptr outfile(mIOSystem->Open(mPath,"wt")); + if (!outfile) { + throw DeadlyExportError("could not open output .pbrt file: " + std::string(mFile)); + } + outfile->Write(mOutput.str().c_str(), mOutput.str().length(), 1); +} + +// Destructor +PbrtExporter::~PbrtExporter() { + // Empty +} + +void PbrtExporter::WriteMetaData() { + mOutput << "#############################\n"; + mOutput << "# Scene metadata:\n"; + + aiMetadata* pMetaData = mScene->mMetaData; + for (unsigned int i = 0; i < pMetaData->mNumProperties; i++) { + mOutput << "# - "; + mOutput << pMetaData->mKeys[i].C_Str() << " :"; + switch(pMetaData->mValues[i].mType) { + case AI_BOOL : { + mOutput << " "; + if (*static_cast(pMetaData->mValues[i].mData)) + mOutput << "TRUE\n"; + else + mOutput << "FALSE\n"; + break; + } + case AI_INT32 : { + mOutput << " " << + *static_cast(pMetaData->mValues[i].mData) << + std::endl; + break; + } + case AI_UINT64 : + mOutput << " " << + *static_cast(pMetaData->mValues[i].mData) << + std::endl; + break; + case AI_FLOAT : + mOutput << " " << + *static_cast(pMetaData->mValues[i].mData) << + std::endl; + break; + case AI_DOUBLE : + mOutput << " " << + *static_cast(pMetaData->mValues[i].mData) << + std::endl; + break; + case AI_AISTRING : { + aiString* value = + static_cast(pMetaData->mValues[i].mData); + std::string svalue = value->C_Str(); + std::size_t found = svalue.find_first_of("\n"); + mOutput << "\n"; + while (found != std::string::npos) { + mOutput << "# " << svalue.substr(0, found) << "\n"; + svalue = svalue.substr(found + 1); + found = svalue.find_first_of("\n"); + } + mOutput << "# " << svalue << "\n"; + break; + } + case AI_AIVECTOR3D : + // TODO + mOutput << " Vector3D (unable to print)\n"; + break; + default: + // AI_META_MAX and FORCE_32BIT + mOutput << " META_MAX or FORCE_32Bit (unable to print)\n"; + break; + } + } +} + +void PbrtExporter::WriteCameras() { + mOutput << "\n"; + mOutput << "###############################\n"; + mOutput << "# Cameras (" << mScene->mNumCameras << ") total\n\n"; + + if (mScene->mNumCameras == 0) { + std::cerr << "Warning: No cameras found in scene file.\n"; + return; + } + + if (mScene->mNumCameras > 1) { + std::cerr << "Multiple cameras found in scene file; defaulting to first one specified.\n"; + } + + for (unsigned int i = 0; i < mScene->mNumCameras; i++) { + WriteCamera(i); + } +} + +aiMatrix4x4 PbrtExporter::GetNodeTransform(const aiString &name) const { + aiMatrix4x4 m; + auto node = mScene->mRootNode->FindNode(name); + if (!node) { + std::cerr << '"' << name.C_Str() << "\": node not found in scene tree.\n"; + throw DeadlyExportError("Could not find node"); + } + else { + while (node) { + m = node->mTransformation * m; + node = node->mParent; + } + } + return m; +} + +std::string PbrtExporter::TransformAsString(const aiMatrix4x4 &m) { + // Transpose on the way out to match pbrt's expected layout (sanity + // check: the translation component should be the last 3 entries + // before a '1' as the final entry in the matrix, assuming it's + // non-projective.) + std::stringstream s; + s << m.a1 << " " << m.b1 << " " << m.c1 << " " << m.d1 << " " + << m.a2 << " " << m.b2 << " " << m.c2 << " " << m.d2 << " " + << m.a3 << " " << m.b3 << " " << m.c3 << " " << m.d3 << " " + << m.a4 << " " << m.b4 << " " << m.c4 << " " << m.d4; + return s.str(); +} + +void PbrtExporter::WriteCamera(int i) { + auto camera = mScene->mCameras[i]; + bool cameraActive = i == 0; + + mOutput << "# - Camera " << i+1 << ": " + << camera->mName.C_Str() << "\n"; + + // Get camera aspect ratio + float aspect = camera->mAspect; + if (aspect == 0) { + aspect = 4.f/3.f; + mOutput << "# - Aspect ratio : 1.33333 (no aspect found, defaulting to 4/3)\n"; + } else { + mOutput << "# - Aspect ratio : " << aspect << "\n"; + } + + // Get Film xres and yres + int xres = 1920; + int yres = (int)round(xres/aspect); + + // Print Film for this camera + if (!cameraActive) + mOutput << "# "; + mOutput << "Film \"rgb\" \"string filename\" \"" << mFile << ".exr\"\n"; + if (!cameraActive) + mOutput << "# "; + mOutput << " \"integer xresolution\" [" << xres << "]\n"; + if (!cameraActive) + mOutput << "# "; + mOutput << " \"integer yresolution\" [" << yres << "]\n"; + + // Get camera fov + float hfov = AI_RAD_TO_DEG(camera->mHorizontalFOV); + float fov = (aspect >= 1.0) ? hfov : (hfov * aspect); + if (fov < 5) { + std::cerr << fov << ": suspiciously low field of view specified by camera. Setting to 45 degrees.\n"; + fov = 45; + } + + // Get camera transform + aiMatrix4x4 worldFromCamera = GetNodeTransform(camera->mName); + + // Print Camera LookAt + auto position = worldFromCamera * camera->mPosition; + auto lookAt = worldFromCamera * (camera->mPosition + camera->mLookAt); + aiMatrix3x3 worldFromCamera3(worldFromCamera); + auto up = worldFromCamera3 * camera->mUp; + up.Normalize(); + + if (!cameraActive) + mOutput << "# "; + mOutput << "Scale -1 1 1\n"; // right handed -> left handed + if (!cameraActive) + mOutput << "# "; + mOutput << "LookAt " + << position.x << " " << position.y << " " << position.z << "\n"; + if (!cameraActive) + mOutput << "# "; + mOutput << " " + << lookAt.x << " " << lookAt.y << " " << lookAt.z << "\n"; + if (!cameraActive) + mOutput << "# "; + mOutput << " " + << up.x << " " << up.y << " " << up.z << "\n"; + + // Print camera descriptor + if (!cameraActive) + mOutput << "# "; + mOutput << "Camera \"perspective\" \"float fov\" " << "[" << fov << "]\n\n"; +} + +void PbrtExporter::WriteWorldDefinition() { + // Figure out which meshes are referenced multiple times; those will be + // emitted as object instances and the rest will be emitted directly. + std::map meshUses; + std::function visitNode; + visitNode = [&](aiNode* node) { + for (unsigned int i = 0; i < node->mNumMeshes; ++i) + ++meshUses[node->mMeshes[i]]; + for (unsigned int i = 0; i < node->mNumChildren; ++i) + visitNode(node->mChildren[i]); + }; + visitNode(mScene->mRootNode); + int nInstanced = 0, nUnused = 0; + for (const auto &u : meshUses) { + if (u.second == 0) ++nUnused; + else if (u.second > 1) ++nInstanced; + } + std::cerr << nInstanced << " / " << mScene->mNumMeshes << " meshes instanced.\n"; + if (nUnused) + std::cerr << nUnused << " meshes defined but not used in scene.\n"; + + mOutput << "WorldBegin\n"; + + WriteLights(); + WriteTextures(); + WriteMaterials(); + + // Object instance definitions + mOutput << "# Object instance definitions\n\n"; + for (const auto &mu : meshUses) { + if (mu.second > 1) { + WriteInstanceDefinition(mu.first); + } + } + + mOutput << "# Geometry\n\n"; + aiMatrix4x4 worldFromObject; + WriteGeometricObjects(mScene->mRootNode, worldFromObject, meshUses); +} + +void PbrtExporter::WriteTextures() { + mOutput << "###################\n"; + mOutput << "# Textures\n\n"; + + C_STRUCT aiString path; + aiTextureMapping mapping; + unsigned int uvIndex; + ai_real blend; + aiTextureOp op; + aiTextureMapMode mapMode[3]; + + // For every material in the scene, + for (unsigned int m = 0 ; m < mScene->mNumMaterials; m++) { + auto material = mScene->mMaterials[m]; + // Parse through all texture types, + for (int tt = 1; tt <= aiTextureType_UNKNOWN; tt++) { + int ttCount = material->GetTextureCount(aiTextureType(tt)); + // ... and get every texture + for (int t = 0; t < ttCount; t++) { + // TODO write out texture specifics + // TODO UV transforms may be material specific + // so those may need to be baked into unique tex name + if (material->GetTexture(aiTextureType(tt), t, &path, &mapping, + &uvIndex, &blend, &op, mapMode) != AI_SUCCESS) { + std::cerr << "Error getting texture! " << m << " " << tt << " " << t << "\n"; + continue; + } + + std::string filename = CleanTextureFilename(path); + + if (uvIndex != 0) + std::cerr << "Warning: texture \"" << filename << "\" uses uv set #" << + uvIndex << " but the pbrt converter only exports uv set 0.\n"; +#if 0 + if (op != aiTextureOp_Multiply) + std::cerr << "Warning: unexpected texture op " << (int)op << + " encountered for texture \"" << + filename << "\". The resulting scene may have issues...\n"; + if (blend != 1) + std::cerr << "Blend value of " << blend << " found for texture \"" << filename + << "\" but not handled in converter.\n"; +#endif + + std::string mappingString; +#if 0 + if (mapMode[0] != mapMode[1]) + std::cerr << "Different texture boundary mode for u and v for texture \"" << + filename << "\". Using u for both.\n"; + switch (mapMode[0]) { + case aiTextureMapMode_Wrap: + // pbrt's default + break; + case aiTextureMapMode_Clamp: + mappingString = "\"string wrap\" \"clamp\""; + break; + case aiTextureMapMode_Decal: + std::cerr << "Decal texture boundary mode not supported by pbrt for texture \"" << + filename << "\"\n"; + break; + case aiTextureMapMode_Mirror: + std::cerr << "Mirror texture boundary mode not supported by pbrt for texture \"" << + filename << "\"\n"; + break; + default: + std::cerr << "Unexpected map mode " << (int)mapMode[0] << " for texture \"" << + filename << "\"\n"; + //throw DeadlyExportError("Unexpected aiTextureMapMode"); + } +#endif + +#if 0 + aiUVTransform uvTransform; + if (material->Get(AI_MATKEY_TEXTURE(tt, t), uvTransform) == AI_SUCCESS) { + mOutput << "# UV transform " << uvTransform.mTranslation.x << " " + << uvTransform.mTranslation.y << " " << uvTransform.mScaling.x << " " + << uvTransform.mScaling.y << " " << uvTransform.mRotation << "\n"; + } +#endif + + std::string texName, texType, texOptions; + if (aiTextureType(tt) == aiTextureType_SHININESS || + aiTextureType(tt) == aiTextureType_OPACITY || + aiTextureType(tt) == aiTextureType_HEIGHT || + aiTextureType(tt) == aiTextureType_DISPLACEMENT || + aiTextureType(tt) == aiTextureType_METALNESS || + aiTextureType(tt) == aiTextureType_DIFFUSE_ROUGHNESS) { + texType = "float"; + texName = std::string("float:") + RemoveSuffix(filename); + + if (aiTextureType(tt) == aiTextureType_SHININESS) { + texOptions = " \"bool invert\" true\n"; + texName += "_Roughness"; + } + } else if (aiTextureType(tt) == aiTextureType_DIFFUSE || + aiTextureType(tt) == aiTextureType_BASE_COLOR) { + texType = "spectrum"; + texName = std::string("rgb:") + RemoveSuffix(filename); + } + + // Don't export textures we're not actually going to use... + if (texName.empty()) + continue; + + if (mTextureSet.find(texName) == mTextureSet.end()) { + mOutput << "Texture \"" << texName << "\" \"" << texType << "\" \"imagemap\"\n" + << texOptions + << " \"string filename\" \"" << filename << "\" " << mappingString << '\n'; + mTextureSet.insert(texName); + } + + // Also emit a float version for use with alpha testing... + if ((aiTextureType(tt) == aiTextureType_DIFFUSE || + aiTextureType(tt) == aiTextureType_BASE_COLOR) && + TextureHasAlphaMask(filename)) { + texType = "float"; + texName = std::string("alpha:") + filename; + if (mTextureSet.find(texName) == mTextureSet.end()) { + mOutput << "Texture \"" << texName << "\" \"" << texType << "\" \"imagemap\"\n" + << " \"string filename\" \"" << filename << "\" " << mappingString << '\n'; + mTextureSet.insert(texName); + } + } + } + } + } +} + +bool PbrtExporter::TextureHasAlphaMask(const std::string &filename) { + // TODO: STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); + // quick return if it's 3 + + int xSize, ySize, nComponents; + unsigned char *data = stbi_load(filename.c_str(), &xSize, &ySize, &nComponents, 0); + if (!data) { + std::cerr << filename << ": unable to load texture and check for alpha mask in texture. " + "Geometry will not be alpha masked with this texture.\n"; + return false; + } + + bool hasMask = false; + switch (nComponents) { + case 1: + for (int i = 0; i < xSize * ySize; ++i) + if (data[i] != 255) { + hasMask = true; + break; + } + break; + case 2: + for (int y = 0; y < ySize; ++y) + for (int x = 0; x < xSize; ++x) + if (data[2 * (x + y * xSize) + 1] != 255) { + hasMask = true; + break; + } + break; + case 3: + break; + case 4: + for (int y = 0; y < ySize; ++y) + for (int x = 0; x < xSize; ++x) + if (data[4 * (x + y * xSize) + 3] != 255) { + hasMask = true; + break; + } + break; + default: + std::cerr << filename << ": unexpected number of image channels, " << + nComponents << ".\n"; + } + + stbi_image_free(data); + return hasMask; +} + +void PbrtExporter::WriteMaterials() { + mOutput << "\n"; + mOutput << "####################\n"; + mOutput << "# Materials (" << mScene->mNumMaterials << ") total\n\n"; + + for (unsigned int i = 0; i < mScene->mNumMaterials; i++) { + WriteMaterial(i); + } + mOutput << "\n\n"; +} + +void PbrtExporter::WriteMaterial(int m) { + aiMaterial* material = mScene->mMaterials[m]; + + // get material name + auto materialName = material->GetName(); + mOutput << std::endl << "# - Material " << m+1 << ": " << materialName.C_Str() << "\n"; + + // Print out number of properties + mOutput << "# - Number of Material Properties: " << material->mNumProperties << "\n"; + + // Print out texture type counts + mOutput << "# - Non-Zero Texture Type Counts: "; + for (int i = 1; i <= aiTextureType_UNKNOWN; i++) { + int count = material->GetTextureCount(aiTextureType(i)); + if (count > 0) + mOutput << TextureTypeToString(aiTextureType(i)) << ": " << count << " "; + } + mOutput << "\n"; + + auto White = [](aiColor3D c) { return c.r == 1 && c.g == 1 && c.b == 1; }; + auto Black = [](aiColor3D c) { return c.r == 0 && c.g == 0 && c.b == 0; }; + + aiColor3D diffuse, specular, transparency; + bool constantDiffuse = (material->Get(AI_MATKEY_COLOR_DIFFUSE, diffuse) == AI_SUCCESS && + !White(diffuse)); + bool constantSpecular = (material->Get(AI_MATKEY_COLOR_SPECULAR, specular) == AI_SUCCESS && + !White(specular)); + bool constantTransparency = (material->Get(AI_MATKEY_COLOR_TRANSPARENT, transparency) == AI_SUCCESS && + !Black(transparency)); + + float opacity, shininess, shininessStrength, eta; + bool constantOpacity = (material->Get(AI_MATKEY_OPACITY, opacity) == AI_SUCCESS && + opacity != 0); + bool constantShininess = material->Get(AI_MATKEY_SHININESS, shininess) == AI_SUCCESS; + bool constantShininessStrength = material->Get(AI_MATKEY_SHININESS_STRENGTH, shininessStrength) == AI_SUCCESS; + bool constantEta = (material->Get(AI_MATKEY_REFRACTI, eta) == AI_SUCCESS && + eta != 1); + + mOutput << "# - Constants: diffuse " << constantDiffuse << " specular " << constantSpecular << + " transprency " << constantTransparency << " opacity " << constantOpacity << + " shininess " << constantShininess << " shininess strength " << constantShininessStrength << + " eta " << constantEta << "\n"; + + aiString roughnessMap; + if (material->Get(AI_MATKEY_TEXTURE_SHININESS(0), roughnessMap) == AI_SUCCESS) { + std::string roughnessTexture = std::string("float:") + + RemoveSuffix(CleanTextureFilename(roughnessMap)) + "_Roughness"; + mOutput << "MakeNamedMaterial \"" << materialName.C_Str() << "\"" + << " \"string type\" \"coateddiffuse\"\n" + << " \"texture roughness\" \"" << roughnessTexture << "\"\n"; + } else if (constantShininess) { + // Assume plastic for now at least + float roughness = std::max(0.f, 1.f - shininess); + mOutput << "MakeNamedMaterial \"" << materialName.C_Str() << "\"" + << " \"string type\" \"coateddiffuse\"\n" + << " \"float roughness\" " << roughness << "\n"; + } else + // Diffuse + mOutput << "MakeNamedMaterial \"" << materialName.C_Str() << "\"" + << " \"string type\" \"diffuse\"\n"; + + aiString diffuseTexture; + if (material->Get(AI_MATKEY_TEXTURE_DIFFUSE(0), diffuseTexture) == AI_SUCCESS) + mOutput << " \"texture reflectance\" \"rgb:" << RemoveSuffix(CleanTextureFilename(diffuseTexture)) << "\"\n"; + else + mOutput << " \"rgb reflectance\" [ " << diffuse.r << " " << diffuse.g << + " " << diffuse.b << " ]\n"; + + aiString displacementTexture, normalMap; + if (material->Get(AI_MATKEY_TEXTURE_NORMALS(0), displacementTexture) == AI_SUCCESS) + mOutput << " \"string normalmap\" \"" << CleanTextureFilename(displacementTexture) << "\"\n"; + else if (material->Get(AI_MATKEY_TEXTURE_HEIGHT(0), displacementTexture) == AI_SUCCESS) + mOutput << " \"texture displacement\" \"float:" << + RemoveSuffix(CleanTextureFilename(displacementTexture)) << "\"\n"; + else if (material->Get(AI_MATKEY_TEXTURE_DISPLACEMENT(0), displacementTexture) == AI_SUCCESS) + mOutput << " \"texture displacement\" \"float:" << + RemoveSuffix(CleanTextureFilename(displacementTexture)) << "\"\n"; +} + +std::string PbrtExporter::CleanTextureFilename(const aiString &f, bool rewriteExtension) const { + std::string fn = f.C_Str(); + // Remove directory name + size_t offset = fn.find_last_of("/\\"); + if (offset != std::string::npos) { + fn.erase(0, offset + 1); + } + + // Expect all textures in textures + fn = std::string("textures") + mIOSystem->getOsSeparator() + fn; + + // Rewrite extension for unsupported file formats. + if (rewriteExtension) { + offset = fn.rfind('.'); + if (offset != std::string::npos) { + std::string extension = fn; + extension.erase(0, offset + 1); + std::transform(extension.begin(), extension.end(), extension.begin(), + [](unsigned char c) { return (char)std::tolower(c); }); + + if (extension != "tga" && extension != "exr" && extension != "png" && + extension != "pfm" && extension != "hdr") { + std::string orig = fn; + fn.erase(offset + 1); + fn += "png"; + + // Does it already exist? Warn if not. + std::ifstream filestream(fn); + if (!filestream.good()) + std::cerr << orig << ": must convert this texture to PNG.\n"; + } + } + } + + return fn; +} + +std::string PbrtExporter::RemoveSuffix(std::string filename) { + size_t offset = filename.rfind('.'); + if (offset != std::string::npos) + filename.erase(offset); + return filename; +} + +void PbrtExporter::WriteLights() { + mOutput << "\n"; + mOutput << "#################\n"; + mOutput << "# Lights\n\n"; + if (mScene->mNumLights == 0) { + // Skip the default light if no cameras and this is flat up geometry + if (mScene->mNumCameras > 0) { + std::cerr << "No lights specified. Using default infinite light.\n"; + + mOutput << "AttributeBegin\n"; + mOutput << " # default light\n"; + mOutput << " LightSource \"infinite\" \"blackbody L\" [6000 1]\n"; + + mOutput << "AttributeEnd\n\n"; + } + } else { + for (unsigned int i = 0; i < mScene->mNumLights; ++i) { + const aiLight *light = mScene->mLights[i]; + + mOutput << "# Light " << light->mName.C_Str() << "\n"; + mOutput << "AttributeBegin\n"; + + aiMatrix4x4 worldFromLight = GetNodeTransform(light->mName); + mOutput << " Transform [ " << TransformAsString(worldFromLight) << " ]\n"; + + aiColor3D color = light->mColorDiffuse + light->mColorSpecular; + if (light->mAttenuationConstant != 0) + color = color * (ai_real)(1. / light->mAttenuationConstant); + + switch (light->mType) { + case aiLightSource_DIRECTIONAL: { + mOutput << " LightSource \"distant\"\n"; + mOutput << " \"point3 from\" [ " << light->mPosition.x << " " << + light->mPosition.y << " " << light->mPosition.z << " ]\n"; + aiVector3D to = light->mPosition + light->mDirection; + mOutput << " \"point3 to\" [ " << to.x << " " << to.y << " " << to.z << " ]\n"; + mOutput << " \"rgb L\" [ " << color.r << " " << color.g << " " << color.b << " ]\n"; + break; + } case aiLightSource_POINT: + mOutput << " LightSource \"distant\"\n"; + mOutput << " \"point3 from\" [ " << light->mPosition.x << " " << + light->mPosition.y << " " << light->mPosition.z << " ]\n"; + mOutput << " \"rgb L\" [ " << color.r << " " << color.g << " " << color.b << " ]\n"; + break; + case aiLightSource_SPOT: { + mOutput << " LightSource \"spot\"\n"; + mOutput << " \"point3 from\" [ " << light->mPosition.x << " " << + light->mPosition.y << " " << light->mPosition.z << " ]\n"; + aiVector3D to = light->mPosition + light->mDirection; + mOutput << " \"point3 to\" [ " << to.x << " " << to.y << " " << to.z << " ]\n"; + mOutput << " \"rgb L\" [ " << color.r << " " << color.g << " " << color.b << " ]\n"; + mOutput << " \"float coneangle\" [ " << AI_RAD_TO_DEG(light->mAngleOuterCone) << " ]\n"; + mOutput << " \"float conedeltaangle\" [ " << AI_RAD_TO_DEG(light->mAngleOuterCone - + light->mAngleInnerCone) << " ]\n"; + break; + } case aiLightSource_AMBIENT: + mOutput << "# ignored ambient light source\n"; + break; + case aiLightSource_AREA: { + aiVector3D left = light->mDirection ^ light->mUp; + // rectangle. center at position, direction is normal vector + float dLeft = light->mSize.x / 2, dUp = light->mSize.y / 2; + aiVector3D vertices[4] = { + light->mPosition - dLeft * left - dUp * light->mUp, + light->mPosition + dLeft * left - dUp * light->mUp, + light->mPosition - dLeft * left + dUp * light->mUp, + light->mPosition + dLeft * left + dUp * light->mUp }; + mOutput << " AreaLightSource \"diffuse\"\n"; + mOutput << " \"rgb L\" [ " << color.r << " " << color.g << " " << color.b << " ]\n"; + mOutput << " Shape \"bilinearmesh\"\n"; + mOutput << " \"point3 p\" [ "; + for (int j = 0; j < 4; ++j) + mOutput << vertices[j].x << " " << vertices[j].y << " " << vertices[j].z; + mOutput << " ]\n"; + mOutput << " \"integer indices\" [ 0 1 2 3 ]\n"; + break; + } default: + mOutput << "# ignored undefined light source type\n"; + break; + } + mOutput << "AttributeEnd\n\n"; + } + } +} + +void PbrtExporter::WriteMesh(aiMesh* mesh) { + mOutput << "# - Mesh: "; + if (mesh->mName == aiString("")) + mOutput << "\n"; + else + mOutput << mesh->mName.C_Str() << "\n"; + + mOutput << "AttributeBegin\n"; + aiMaterial* material = mScene->mMaterials[mesh->mMaterialIndex]; + mOutput << " NamedMaterial \"" << material->GetName().C_Str() << "\"\n"; + + // Handle area lights + aiColor3D emission; + if (material->Get(AI_MATKEY_COLOR_EMISSIVE, emission) == AI_SUCCESS && + (emission.r > 0 || emission.g > 0 || emission.b > 0)) + mOutput << " AreaLightSource \"diffuse\" \"rgb L\" [ " << emission.r << + " " << emission.g << " " << emission.b << " ]\n"; + + // Check if any types other than tri + if ( (mesh->mPrimitiveTypes & aiPrimitiveType_POINT) + || (mesh->mPrimitiveTypes & aiPrimitiveType_LINE) + || (mesh->mPrimitiveTypes & aiPrimitiveType_POLYGON)) { + std::cerr << "Error: ignoring point / line / polygon mesh " << mesh->mName.C_Str() << ".\n"; + return; + } + + // Alpha mask + std::string alpha; + aiString opacityTexture; + if (material->Get(AI_MATKEY_TEXTURE_OPACITY(0), opacityTexture) == AI_SUCCESS || + material->Get(AI_MATKEY_TEXTURE_DIFFUSE(0), opacityTexture) == AI_SUCCESS) { + // material->Get(AI_MATKEY_TEXTURE_BASE_COLOR(0), opacityTexture) == AI_SUCCESS) + std::string texName = std::string("alpha:") + CleanTextureFilename(opacityTexture); + if (mTextureSet.find(texName) != mTextureSet.end()) + alpha = std::string(" \"texture alpha\" \"") + texName + "\"\n"; + } else { + float opacity = 1; + if (material->Get(AI_MATKEY_OPACITY, opacity) == AI_SUCCESS && opacity < 1) + alpha = std::string(" \"float alpha\" [ ") + std::to_string(opacity) + " ]\n"; + } + + // Output the shape specification + mOutput << "Shape \"trianglemesh\"\n" << + alpha << + " \"integer indices\" ["; + + // Start with faces (which hold indices) + for (unsigned int i = 0; i < mesh->mNumFaces; i++) { + auto face = mesh->mFaces[i]; + if (face.mNumIndices != 3) throw DeadlyExportError("oh no not a tri!"); + + for (unsigned int j = 0; j < face.mNumIndices; j++) { + mOutput << face.mIndices[j] << " "; + } + if ((i % 7) == 6) mOutput << "\n "; + } + mOutput << "]\n"; + + // Then go to vertices + mOutput << " \"point3 P\" ["; + for (unsigned int i = 0; i < mesh->mNumVertices; i++) { + auto vector = mesh->mVertices[i]; + mOutput << vector.x << " " << vector.y << " " << vector.z << " "; + if ((i % 4) == 3) mOutput << "\n "; + } + mOutput << "]\n"; + + // Normals (if present) + if (mesh->mNormals) { + mOutput << " \"normal N\" ["; + for (unsigned int i = 0; i < mesh->mNumVertices; i++) { + auto normal = mesh->mNormals[i]; + mOutput << normal.x << " " << normal.y << " " << normal.z << " "; + if ((i % 4) == 3) mOutput << "\n "; + } + mOutput << "]\n"; + } + + // Tangents (if present) + if (mesh->mTangents) { + mOutput << " \"vector3 S\" ["; + for (unsigned int i = 0; i < mesh->mNumVertices; i++) { + auto tangent = mesh->mTangents[i]; + mOutput << tangent.x << " " << tangent.y << " " << tangent.z << " "; + if ((i % 4) == 3) mOutput << "\n "; + } + mOutput << "]\n"; + } + + // Texture Coords (if present) + // Find the first set of 2D texture coordinates.. + for (int i = 0; i < AI_MAX_NUMBER_OF_TEXTURECOORDS; ++i) { + if (mesh->mNumUVComponents[i] == 2) { + // assert(mesh->mTextureCoords[i] != nullptr); + aiVector3D* uv = mesh->mTextureCoords[i]; + mOutput << " \"point2 uv\" ["; + for (unsigned int j = 0; j < mesh->mNumVertices; ++j) { + mOutput << uv[j].x << " " << uv[j].y << " "; + if ((j % 6) == 5) mOutput << "\n "; + } + mOutput << "]\n"; + break; + } + } + // TODO: issue warning if there are additional UV sets? + + mOutput << "AttributeEnd\n"; +} + +void PbrtExporter::WriteInstanceDefinition(int i) { + aiMesh* mesh = mScene->mMeshes[i]; + + mOutput << "ObjectBegin \""; + if (mesh->mName == aiString("")) + mOutput << "mesh_" << i+1 << "\"\n"; + else + mOutput << mesh->mName.C_Str() << "_" << i+1 << "\"\n"; + + WriteMesh(mesh); + + mOutput << "ObjectEnd\n"; +} + +void PbrtExporter::WriteGeometricObjects(aiNode* node, aiMatrix4x4 worldFromObject, + std::map &meshUses) { + // Sometimes interior nodes have degenerate matrices?? + if (node->mTransformation.Determinant() != 0) + worldFromObject = worldFromObject * node->mTransformation; + + if (node->mNumMeshes > 0) { + mOutput << "AttributeBegin\n"; + + mOutput << " Transform [ " << TransformAsString(worldFromObject) << "]\n"; + + for (unsigned int i = 0; i < node->mNumMeshes; i++) { + aiMesh* mesh = mScene->mMeshes[node->mMeshes[i]]; + if (meshUses[node->mMeshes[i]] == 1) { + // If it's only used once in the scene, emit it directly as + // a triangle mesh. + mOutput << " # " << mesh->mName.C_Str(); + WriteMesh(mesh); + } else { + // If it's used multiple times, there will be an object + // instance for it, so emit a reference to that. + mOutput << " ObjectInstance \""; + if (mesh->mName == aiString("")) + mOutput << "mesh_" << node->mMeshes[i] + 1 << "\"\n"; + else + mOutput << mesh->mName.C_Str() << "_" << node->mMeshes[i] + 1 << "\"\n"; + } + } + mOutput << "AttributeEnd\n\n"; + } + + // Recurse through children + for (unsigned int i = 0; i < node->mNumChildren; i++) { + WriteGeometricObjects(node->mChildren[i], worldFromObject, meshUses); + } +} + +#endif // ASSIMP_BUILD_NO_PBRT_EXPORTER +#endif // ASSIMP_BUILD_NO_EXPORT diff --git a/code/Pbrt/PbrtExporter.h b/code/Pbrt/PbrtExporter.h new file mode 100644 index 000000000..31f6f1853 --- /dev/null +++ b/code/Pbrt/PbrtExporter.h @@ -0,0 +1,135 @@ +/* +Open Asset Import Library (assimp) +---------------------------------------------------------------------- + +Copyright (c) 2006-2020, assimp team + +All rights reserved. + +Redistribution and use of this software in source and binary forms, +with or without modification, are permitted provided that the +following conditions are met: + +* Redistributions of source code must retain the above +copyright notice, this list of conditions and the +following disclaimer. + +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the +following disclaimer in the documentation and/or other +materials provided with the distribution. + +* Neither the name of the assimp team, nor the names of its +contributors may be used to endorse or promote products +derived from this software without specific prior +written permission of the assimp team. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- +*/ + +/** @file PbrtExporter.h +* Declares the exporter class to write a scene to a pbrt file +*/ +#ifndef AI_PBRTEXPORTER_H_INC +#define AI_PBRTEXPORTER_H_INC + +#ifndef ASSIMP_BUILD_NO_PBRT_EXPORTER + +#include +#include +#include + +#include +#include +#include +#include + +struct aiScene; +struct aiNode; +struct aiMaterial; +struct aiMesh; + +namespace Assimp { + +class IOSystem; +class IOStream; +class ExportProperties; + +// --------------------------------------------------------------------- +/** Helper class to export a given scene to a Pbrt file. */ +// --------------------------------------------------------------------- +class PbrtExporter +{ +public: + /// Constructor for a specific scene to export + PbrtExporter(const aiScene* pScene, IOSystem* pIOSystem, + const std::string path, const std::string file); + + /// Destructor + virtual ~PbrtExporter(); + +private: + // the scene to export + const aiScene* mScene; + + /// Stringstream to write all output into + std::stringstream mOutput; + + /// The IOSystem for output + IOSystem* mIOSystem; + + /// Path of the directory where the scene will be exported + const std::string mPath; + + /// Name of the file (without extension) where the scene will be exported + const std::string mFile; + +private: + // A private set to keep track of which textures have been declared + std::set mTextureSet; + + aiMatrix4x4 GetNodeTransform(const aiString& name) const; + static std::string TransformAsString(const aiMatrix4x4& m); + + static std::string RemoveSuffix(std::string filename); + std::string CleanTextureFilename(const aiString &f, bool rewriteExtension = true) const; + + void WriteMetaData(); + + void WriteWorldDefinition(); + + void WriteCameras(); + void WriteCamera(int i); + + void WriteLights(); + + void WriteTextures(); + static bool TextureHasAlphaMask(const std::string &filename); + + void WriteMaterials(); + void WriteMaterial(int i); + + void WriteMesh(aiMesh* mesh); + + void WriteInstanceDefinition(int i); + void WriteGeometricObjects(aiNode* node, aiMatrix4x4 parentTransform, + std::map &meshUses); +}; + +} // namespace Assimp + +#endif // ASSIMP_BUILD_NO_PBRT_EXPORTER + +#endif // AI_PBRTEXPORTER_H_INC diff --git a/code/Pbrt/stb_image.h b/code/Pbrt/stb_image.h new file mode 100644 index 000000000..65a205f6e --- /dev/null +++ b/code/Pbrt/stb_image.h @@ -0,0 +1,7756 @@ +/* stb_image - v2.26 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko [reserved] + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode))) + return 0; + +#if _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + if (n < 0 || n >= (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))) return 0; + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t == -1) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if ((unsigned int)b >= sizeof (z->size)) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + return -1; /* report error for unexpected end of data. */ + } + stbi__fill_bits(a); + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[288] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + STBI_ASSERT(info.offset == s->callback_already_read + (int) (s->img_buffer - s->img_buffer_original)); + if (info.offset != s->callback_already_read + (s->img_buffer - s->buffer_start)) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC( out, layers * stride ); + if (NULL == tmp) { + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + return stbi__errpuc("outofmem", "Out of memory"); + } + else { + out = (stbi_uc*) tmp; + } + + if (delays) { + *delays = (int*) STBI_REALLOC( *delays, sizeof(int) * layers ); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + (void) stbi__get32be(s); + (void) stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/code/PostProcessing/ArmaturePopulate.cpp b/code/PostProcessing/ArmaturePopulate.cpp index 48dcecb15..fa5113a41 100644 --- a/code/PostProcessing/ArmaturePopulate.cpp +++ b/code/PostProcessing/ArmaturePopulate.cpp @@ -124,13 +124,13 @@ void ArmaturePopulate::BuildBoneList(aiNode *current_node, for (unsigned int boneId = 0; boneId < mesh->mNumBones; ++boneId) { aiBone *bone = mesh->mBones[boneId]; - ai_assert(bone); + ai_assert(nullptr != bone); // duplicate mehes exist with the same bones sometimes :) // so this must be detected if (std::find(bones.begin(), bones.end(), bone) == bones.end()) { // add the element once - bones.push_back(bone); + bones.emplace_back(bone); } } @@ -145,14 +145,14 @@ void ArmaturePopulate::BuildBoneList(aiNode *current_node, // Prepare flat node list which can be used for non recursive lookups later void ArmaturePopulate::BuildNodeList(const aiNode *current_node, std::vector &nodes) { - ai_assert(current_node); + ai_assert(nullptr != current_node); for (unsigned int nodeId = 0; nodeId < current_node->mNumChildren; ++nodeId) { aiNode *child = current_node->mChildren[nodeId]; ai_assert(child); if (child->mNumMeshes == 0) { - nodes.push_back(child); + nodes.emplace_back(child); } BuildNodeList(child, nodes); @@ -168,8 +168,10 @@ void ArmaturePopulate::BuildBoneStack(aiNode *, const std::vector &bones, std::map &bone_stack, std::vector &node_stack) { - ai_assert(root_node); - ai_assert(!node_stack.empty()); + if (node_stack.empty()) { + return; + } + ai_assert(nullptr != root_node); for (aiBone *bone : bones) { ai_assert(bone); @@ -181,7 +183,7 @@ void ArmaturePopulate::BuildBoneStack(aiNode *, node = GetNodeFromStack(bone->mName, node_stack); - if (!node) { + if (nullptr == node) { ASSIMP_LOG_ERROR("serious import issue node for bone was not detected"); continue; } @@ -199,7 +201,7 @@ void ArmaturePopulate::BuildBoneStack(aiNode *, // points. (yet) aiNode *ArmaturePopulate::GetArmatureRoot(aiNode *bone_node, std::vector &bone_list) { - while (bone_node) { + while (nullptr != bone_node) { if (!IsBoneNode(bone_node->mName, bone_list)) { ASSIMP_LOG_VERBOSE_DEBUG_F("GetArmatureRoot() Found valid armature: ", bone_node->mName.C_Str()); return bone_node; @@ -236,7 +238,7 @@ aiNode *ArmaturePopulate::GetNodeFromStack(const aiString &node_name, aiNode *found = nullptr; for (iter = nodes.begin(); iter < nodes.end(); ++iter) { aiNode *element = *iter; - ai_assert(element); + ai_assert(nullptr != element); // node valid and node name matches if (element->mName == node_name) { found = element; diff --git a/code/PostProcessing/ValidateDataStructure.cpp b/code/PostProcessing/ValidateDataStructure.cpp index e7392d9e5..3e0224eac 100644 --- a/code/PostProcessing/ValidateDataStructure.cpp +++ b/code/PostProcessing/ValidateDataStructure.cpp @@ -844,7 +844,8 @@ void ValidateDSProcess::Validate(const aiAnimation *pAnimation, Validate(&pMeshMorphAnim->mName); if (!pMeshMorphAnim->mNumKeys) { - ReportError("Empty mesh morph animation channel"); + ReportWarning("Empty mesh morph animation channel"); + return; } // otherwise check whether one of the keys exceeds the total duration of the animation diff --git a/contrib/draco/.clang-format b/contrib/draco/.clang-format new file mode 100644 index 000000000..533d35e6d --- /dev/null +++ b/contrib/draco/.clang-format @@ -0,0 +1,5 @@ +--- +Language: Cpp +BasedOnStyle: Google +PointerAlignment: Right +... diff --git a/contrib/draco/.cmake-format.py b/contrib/draco/.cmake-format.py new file mode 100644 index 000000000..64f2495b4 --- /dev/null +++ b/contrib/draco/.cmake-format.py @@ -0,0 +1,102 @@ +# Generated with cmake-format 0.5.1 +# How wide to allow formatted cmake files +line_width = 80 + +# How many spaces to tab for indent +tab_size = 2 + +# If arglists are longer than this, break them always +max_subargs_per_line = 10 + +# If true, separate flow control names from their parentheses with a space +separate_ctrl_name_with_space = False + +# If true, separate function names from parentheses with a space +separate_fn_name_with_space = False + +# If a statement is wrapped to more than one line, than dangle the closing +# parenthesis on its own line +dangle_parens = False + +# What character to use for bulleted lists +bullet_char = '*' + +# What character to use as punctuation after numerals in an enumerated list +enum_char = '.' + +# What style line endings to use in the output. +line_ending = u'unix' + +# Format command names consistently as 'lower' or 'upper' case +command_case = u'lower' + +# Format keywords consistently as 'lower' or 'upper' case +keyword_case = u'unchanged' + +# Specify structure for custom cmake functions +additional_commands = { + "foo": { + "flags": [ + "BAR", + "BAZ" + ], + "kwargs": { + "HEADERS": "*", + "DEPENDS": "*", + "SOURCES": "*" + } + } +} + +# A list of command names which should always be wrapped +always_wrap = [] + +# Specify the order of wrapping algorithms during successive reflow attempts +algorithm_order = [0, 1, 2, 3, 4] + +# If true, the argument lists which are known to be sortable will be sorted +# lexicographicall +autosort = False + +# enable comment markup parsing and reflow +enable_markup = True + +# If comment markup is enabled, don't reflow the first comment block in +# eachlistfile. Use this to preserve formatting of your +# copyright/licensestatements. +first_comment_is_literal = False + +# If comment markup is enabled, don't reflow any comment block which matchesthis +# (regex) pattern. Default is `None` (disabled). +literal_comment_pattern = None + +# Regular expression to match preformat fences in comments +# default=r'^\s*([`~]{3}[`~]*)(.*)$' +fence_pattern = u'^\\s*([`~]{3}[`~]*)(.*)$' + +# Regular expression to match rulers in comments +# default=r'^\s*[^\w\s]{3}.*[^\w\s]{3}$' +ruler_pattern = u'^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$' + +# If true, emit the unicode byte-order mark (BOM) at the start of the file +emit_byteorder_mark = False + +# If a comment line starts with at least this many consecutive hash characters, +# then don't lstrip() them off. This allows for lazy hash rulers where the first +# hash char is not separated by space +hashruler_min_length = 10 + +# If true, then insert a space between the first hash char and remaining hash +# chars in a hash ruler, and normalize its length to fill the column +canonicalize_hashrulers = True + +# Specify the encoding of the input file. Defaults to utf-8. +input_encoding = u'utf-8' + +# Specify the encoding of the output file. Defaults to utf-8. Note that cmake +# only claims to support utf-8 so be careful when using anything else +output_encoding = u'utf-8' + +# A dictionary containing any per-command configuration overrides. Currently +# only `command_case` is supported. +per_command = {} diff --git a/contrib/draco/.gitignore b/contrib/draco/.gitignore new file mode 100644 index 000000000..522866ee2 --- /dev/null +++ b/contrib/draco/.gitignore @@ -0,0 +1 @@ +docs/_site diff --git a/contrib/draco/.ruby-version b/contrib/draco/.ruby-version new file mode 100644 index 000000000..276cbf9e2 --- /dev/null +++ b/contrib/draco/.ruby-version @@ -0,0 +1 @@ +2.3.0 diff --git a/contrib/draco/.travis.yml b/contrib/draco/.travis.yml new file mode 100644 index 000000000..e9ef7123f --- /dev/null +++ b/contrib/draco/.travis.yml @@ -0,0 +1,31 @@ +cache: ccache +language: cpp +matrix: + include: + - os: linux + dist: xenial + compiler: clang + - os: linux + dist: xenial + compiler: gcc + - os: osx + compiler: clang + +addons: + apt: + packages: + - cmake + +script: + # Output version info for compilers, cmake, and make + - ${CC} -v + - ${CXX} -v + - cmake --version + - make --version + # Clone googletest + - pushd .. && git clone https://github.com/google/googletest.git && popd + # Configure and build + - mkdir _travis_build && cd _travis_build + - cmake -G "Unix Makefiles" -DENABLE_TESTS=ON .. + - make -j10 + - ./draco_tests diff --git a/contrib/draco/AUTHORS b/contrib/draco/AUTHORS new file mode 100644 index 000000000..67f63a671 --- /dev/null +++ b/contrib/draco/AUTHORS @@ -0,0 +1,7 @@ +# This is the list of Draco authors for copyright purposes. +# +# This does not necessarily list everyone who has contributed code, since in +# some cases, their employer may be the copyright holder. To see the full list +# of contributors, see the revision history in source control. +Google Inc. +and other contributors diff --git a/contrib/draco/BUILDING.md b/contrib/draco/BUILDING.md new file mode 100644 index 000000000..d33917b88 --- /dev/null +++ b/contrib/draco/BUILDING.md @@ -0,0 +1,301 @@ +_**Contents**_ + + * [CMake Basics](#cmake-basics) + * [Mac OS X](#mac-os-x) + * [Windows](#windows) + * [CMake Build Configuration](#cmake-build-configuration) + * [Debugging and Optimization](#debugging-and-optimization) + * [Googletest Integration](#googletest-integration) + * [Javascript Encoder/Decoder](#javascript-encoderdecoder) + * [WebAssembly Decoder](#webassembly-decoder) + * [WebAssembly Mesh Only Decoder](#webassembly-mesh-only-decoder) + * [WebAssembly Point Cloud Only Decoder](#webassembly-point-cloud-only-decoder) + * [iOS Builds](#ios-builds) + * [Android Studio Project Integration](#android-studio-project-integration) + * [Native Android Builds](#native-android-builds) + * [vcpkg](#vcpkg) + +Building +======== +For all platforms, you must first generate the project/make files and then +compile the examples. + +CMake Basics +------------ + +To generate project/make files for the default toolchain on your system, run +`cmake` from a directory where you would like to generate build files, and pass +it the path to your Draco repository. + +E.g. Starting from Draco root. + +~~~~~ bash +$ mkdir build_dir && cd build_dir +$ cmake ../ +~~~~~ + +On Windows, the above command will produce Visual Studio project files for the +newest Visual Studio detected on the system. On Mac OS X and Linux systems, +the above command will produce a `makefile`. + +To control what types of projects are generated, add the `-G` parameter to the +`cmake` command. This argument must be followed by the name of a generator. +Running `cmake` with the `--help` argument will list the available +generators for your system. + +Mac OS X +--------- + +On Mac OS X, run the following command to generate Xcode projects: + +~~~~~ bash +$ cmake ../ -G Xcode +~~~~~ + +Windows +------- + +On a Windows box you would run the following command to generate Visual Studio +2019 projects: + +~~~~~ bash +C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A Win32 +~~~~~ + +To generate 64-bit Windows Visual Studio 2019 projects: + +~~~~~ bash +C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A x64 +~~~~~ + + +CMake Build Configuration +------------------------- + +Debugging and Optimization +-------------------------- + +Unlike Visual Studio and Xcode projects, the build configuration for make +builds is controlled when you run `cmake`. The following examples demonstrate +various build configurations. + +Omitting the build type produces makefiles that use release build flags +by default: + +~~~~~ bash +$ cmake ../ +~~~~~ + +A makefile using release (optimized) flags is produced like this: + +~~~~~ bash +$ cmake ../ -DCMAKE_BUILD_TYPE=Release +~~~~~ + +A release build with debug info can be produced as well: + +~~~~~ bash +$ cmake ../ -DCMAKE_BUILD_TYPE=RelWithDebInfo +~~~~~ + +And your standard debug build will be produced using: + +~~~~~ bash +$ cmake ../ -DCMAKE_BUILD_TYPE=Debug +~~~~~ + +To enable the use of sanitizers when the compiler in use supports them, set the +sanitizer type when running CMake: + +~~~~~ bash +$ cmake ../ -DDRACO_SANITIZE=address +~~~~~ + +Googletest Integration +---------------------- + +Draco includes testing support built using Googletest. To enable Googletest unit +test support the DRACO_TESTS cmake variable must be turned on at cmake +generation time: + +~~~~~ bash +$ cmake ../ -DDRACO_TESTS=ON +~~~~~ + +When cmake is used as shown in the above example the googletest directory must +be a sibling of the Draco repository root directory. To run the tests execute +`draco_tests` from your build output directory. + +WebAssembly Decoder +------------------- + +The WebAssembly decoder can be built using the existing cmake build file by +passing the path the Emscripten's cmake toolchain file at cmake generation time +in the CMAKE_TOOLCHAIN_FILE variable and enabling the WASM build option. +In addition, the EMSCRIPTEN environment variable must be set to the local path +of the parent directory of the Emscripten tools directory. + +~~~~~ bash +# Make the path to emscripten available to cmake. +$ export EMSCRIPTEN=/path/to/emscripten/tools/parent + +# Emscripten.cmake can be found within your Emscripten installation directory, +# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON + +# Build the WebAssembly decoder. +$ make + +# Run the Javascript wrapper through Closure. +$ java -jar closure.jar --compilation_level SIMPLE --js draco_decoder.js --js_output_file draco_wasm_wrapper.js + +~~~~~ + +WebAssembly Mesh Only Decoder +----------------------------- + +~~~~~ bash + +# cmake command line for mesh only WebAssembly decoder. +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_POINT_CLOUD_COMPRESSION=OFF + +~~~~~ + +WebAssembly Point Cloud Only Decoder +----------------------------- + +~~~~~ bash + +# cmake command line for point cloud only WebAssembly decoder. +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_MESH_COMPRESSION=OFF + +~~~~~ + +Javascript Encoder/Decoder +------------------ + +The javascript encoder and decoder can be built using the existing cmake build +file by passing the path the Emscripten's cmake toolchain file at cmake +generation time in the CMAKE_TOOLCHAIN_FILE variable. +In addition, the EMSCRIPTEN environment variable must be set to the local path +of the parent directory of the Emscripten tools directory. + +*Note* The WebAssembly decoder should be favored over the JavaScript decoder. + +~~~~~ bash +# Make the path to emscripten available to cmake. +$ export EMSCRIPTEN=/path/to/emscripten/tools/parent + +# Emscripten.cmake can be found within your Emscripten installation directory, +# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake + +# Build the Javascript encoder and decoder. +$ make +~~~~~ + +iOS Builds +--------------------- +These are the basic commands needed to build Draco for iOS targets. +~~~~~ bash + +#arm64 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/arm64-ios.cmake +$ make + +#x86_64 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/x86_64-ios.cmake +$ make + +#armv7 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/armv7-ios.cmake +$ make + +#i386 +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/i386-ios.cmake +$ make +~~~~~~ + +After building for each target the libraries can be merged into a single +universal/fat library using lipo, and then used in iOS applications. + + +Native Android Builds +--------------------- + +It's sometimes useful to build Draco command line tools and run them directly on +Android devices via adb. + +~~~~~ bash +# This example is for armeabi-v7a. +$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/android.cmake \ + -DDRACO_ANDROID_NDK_PATH=path/to/ndk -DANDROID_ABI=armeabi-v7a +$ make + +# See the android.cmake toolchain file for additional ANDROID_ABI options and +# other configurable Android variables. +~~~~~ + +After building the tools they can be moved to an android device via the use of +`adb push`, and then run within an `adb shell` instance. + + +Android Studio Project Integration +---------------------------------- + +Tested on Android Studio 3.5.3. + + +Draco - Static Library +---------------------- + +To include Draco in an existing or new Android Studio project, reference it +from the `cmake` file of an existing native project that has a minimum SDK +version of 18 or higher. The project must support C++11. +To add Draco to your project: + + 1. Create a new "Native C++" project. + + 2. Add the following somewhere within the `CMakeLists.txt` for your project + before the `add_library()` for your project's native-lib: + + ~~~~~ cmake + # Note "/path/to/draco" must be changed to the path where you have cloned + # the Draco sources. + + add_subdirectory(/path/to/draco + ${CMAKE_BINARY_DIR}/draco_build) + include_directories("${CMAKE_BINARY_DIR}" /path/to/draco) + ~~~~~ + + 3. Add the library target "draco" to the `target_link_libraries()` call for + your project's native-lib. The `target_link_libraries()` call for an + empty activity native project looks like this after the addition of + Draco: + + ~~~~~ cmake + target_link_libraries( # Specifies the target library. + native-lib + + # Tells cmake this build depends on libdraco. + draco + + # Links the target library to the log library + # included in the NDK. + ${log-lib} ) + +vcpkg +--------------------- +You can download and install Draco using the +[vcpkg](https://github.com/Microsoft/vcpkg/) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + vcpkg install draco + +The Draco port in vcpkg is kept up to date by Microsoft team members and +community contributors. If the version is out of date, please +[create an issue or pull request](https://github.com/Microsoft/vcpkg) on the +vcpkg repository. diff --git a/contrib/draco/CMAKE.md b/contrib/draco/CMAKE.md new file mode 100644 index 000000000..392c6ce40 --- /dev/null +++ b/contrib/draco/CMAKE.md @@ -0,0 +1,106 @@ +# CMake Build System Overview + +[TOC] + +This document provides a general layout of the Draco CMake build system. + +## Core Build System Files + +These files are listed in order of interest to maintainers of the build system. + +- `CMakeLists.txt` is the main driver of the build system. It's responsible + for defining targets and source lists, surfacing build system options, and + tying the components of the build system together. + +- `cmake/draco_build_definitions.cmake` defines the macro + `draco_set_build_definitions()`, which is called from `CMakeLists.txt` to + configure include paths, compiler and linker flags, library settings, + platform speficic configuration, and other build system settings that + depend on optional build configurations. + +- `cmake/draco_targets.cmake` defines the macros `draco_add_library()` and + `draco_add_executable()` which are used to create all targets in the CMake + build. These macros attempt to behave in a manner that loosely mirrors the + blaze `cc_library()` and `cc_binary()` commands. Note that + `draco_add_executable()` is also used for tests. + +- `cmake/draco_emscripten.cmake` handles Emscripten SDK integration. It + defines several Emscripten specific macros that are required to build the + Emscripten specific targets defined in `CMakeLists.txt`. + +- `cmake/draco_flags.cmake` defines macros related to compiler and linker + flags. Testing macros, macros for isolating flags to specific source files, + and the main flag configuration function for the library are defined here. + +- `cmake/draco_options.cmake` defines macros that control optional features + of draco, and help track draco library and build system options. + +- `cmake/draco_install.cmake` defines the draco install target. + +- `cmake/draco_cpu_detection.cmake` determines the optimization types to + enable based on target system processor as reported by CMake. + +- `cmake/draco_intrinsics.cmake` manages flags for source files that use + intrinsics. It handles detection of whether flags are necessary, and the + application of the flags to the sources that need them when they are + required. + +## Helper and Utility Files + +- `.cmake-format.py` Defines coding style for cmake-format. + +- `cmake/draco_helpers.cmake` defines utility macros. + +- `cmake/draco_sanitizer.cmake` defines the `draco_configure_sanitizer()` + macro, which implements support for `DRACO_SANITIZE`. It handles the + compiler and linker flags necessary for using sanitizers like asan and msan. + +- `cmake/draco_variables.cmake` defines macros for tracking and control of + draco build system variables. + +## Toolchain Files + +These files help facilitate cross compiling of draco for various targets. + +- `cmake/toolchains/aarch64-linux-gnu.cmake` provides cross compilation + support for arm64 targets. + +- `cmake/toolchains/android.cmake` provides cross compilation support for + Android targets. + +- `cmake/toolchains/arm-linux-gnueabihf.cmake` provides cross compilation + support for armv7 targets. + +- `cmake/toolchains/arm64-ios.cmake`, `cmake/toolchains/armv7-ios.cmake`, + and `cmake/toolchains/armv7s-ios.cmake` provide support for iOS. + +- `cmake/toolchains/arm64-linux-gcc.cmake` and + `cmake/toolchains/armv7-linux-gcc.cmake` are deprecated, but remain for + compatibility. `cmake/toolchains/android.cmake` should be used instead. + +- `cmake/toolchains/arm64-android-ndk-libcpp.cmake`, + `cmake/toolchains/armv7-android-ndk-libcpp.cmake`, + `cmake/toolchains/x86-android-ndk-libcpp.cmake`, and + `cmake/toolchains/x86_64-android-ndk-libcpp.cmake` are deprecated, but + remain for compatibility. `cmake/toolchains/android.cmake` should be used + instead. + +- `cmake/toolchains/i386-ios.cmake` and `cmake/toolchains/x86_64-ios.cmake` + provide support for the iOS simulator. + +- `cmake/toolchains/android-ndk-common.cmake` and + `cmake/toolchains/arm-ios-common.cmake` are support files used by other + toolchain files. + +## Template Files + +These files are inputs to the CMake build and are used to generate inputs to the +build system output by CMake. + +- `cmake/draco-config.cmake.template` is used to produce + draco-config.cmake. draco-config.cmake can be used by CMake to find draco + when another CMake project depends on draco. + +- `cmake/draco.pc.template` is used to produce draco's pkg-config file. + Some build systems use pkg-config to configure include and library paths + when they depend upon third party libraries like draco. diff --git a/contrib/draco/CMakeLists.txt b/contrib/draco/CMakeLists.txt new file mode 100644 index 000000000..3da2c664a --- /dev/null +++ b/contrib/draco/CMakeLists.txt @@ -0,0 +1,958 @@ +cmake_minimum_required(VERSION 3.12 FATAL_ERROR) + +# Draco requires C++11. +set(CMAKE_CXX_STANDARD 11) +project(draco C CXX) + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release) +endif() + +set(draco_root "${CMAKE_CURRENT_SOURCE_DIR}") +set(draco_src_root "${draco_root}/src/draco") +set(draco_build "${CMAKE_BINARY_DIR}") + +if("${draco_root}" STREQUAL "${draco_build}") + message( + FATAL_ERROR "Building from within the Draco source tree is not supported.\n" + "Hint: Run these commands\n" + "$ rm -rf CMakeCache.txt CMakeFiles\n" + "$ mkdir -p ../draco_build\n" "$ cd ../draco_build\n" + "And re-run CMake from the draco_build directory.") +endif() + +include(CMakePackageConfigHelpers) +include(FindPythonInterp) +include("${draco_root}/cmake/draco_build_definitions.cmake") +include("${draco_root}/cmake/draco_cpu_detection.cmake") +include("${draco_root}/cmake/draco_emscripten.cmake") +include("${draco_root}/cmake/draco_flags.cmake") +include("${draco_root}/cmake/draco_helpers.cmake") +include("${draco_root}/cmake/draco_install.cmake") +include("${draco_root}/cmake/draco_intrinsics.cmake") +include("${draco_root}/cmake/draco_options.cmake") +include("${draco_root}/cmake/draco_sanitizer.cmake") +include("${draco_root}/cmake/draco_targets.cmake") +include("${draco_root}/cmake/draco_tests.cmake") +include("${draco_root}/cmake/draco_variables.cmake") + +# C++ and linker flags. +draco_track_configuration_variable(DRACO_CXX_FLAGS) +draco_track_configuration_variable(DRACO_EXE_LINKER_FLAGS) + +# Sanitizer integration. +draco_track_configuration_variable(DRACO_SANITIZE) + +# Generated source file directory. +draco_track_configuration_variable(DRACO_GENERATED_SOURCES_DIRECTORY) + +# Controls use of std::mutex and absl::Mutex in ThreadPool. +draco_track_configuration_variable(DRACO_THREADPOOL_USE_STD_MUTEX) + +if(DRACO_VERBOSE) + draco_dump_cmake_flag_variables() + draco_dump_tracked_configuration_variables() + draco_dump_options() +endif() + +# Compiler/linker flags must be lists, but come in from the environment as +# strings. Break them up: +if(NOT "${DRACO_CXX_FLAGS}" STREQUAL "") + separate_arguments(DRACO_CXX_FLAGS) +endif() +if(NOT "${DRACO_EXE_LINKER_FLAGS}" STREQUAL "") + separate_arguments(DRACO_EXE_LINKER_FLAGS) +endif() + +draco_reset_target_lists() +draco_setup_options() +draco_set_build_definitions() +draco_set_cxx_flags() +draco_generate_features_h() + +# Draco source file listing variables. +list(APPEND draco_attributes_sources + "${draco_src_root}/attributes/attribute_octahedron_transform.cc" + "${draco_src_root}/attributes/attribute_octahedron_transform.h" + "${draco_src_root}/attributes/attribute_quantization_transform.cc" + "${draco_src_root}/attributes/attribute_quantization_transform.h" + "${draco_src_root}/attributes/attribute_transform.cc" + "${draco_src_root}/attributes/attribute_transform.h" + "${draco_src_root}/attributes/attribute_transform_data.h" + "${draco_src_root}/attributes/attribute_transform_type.h" + "${draco_src_root}/attributes/geometry_attribute.cc" + "${draco_src_root}/attributes/geometry_attribute.h" + "${draco_src_root}/attributes/geometry_indices.h" + "${draco_src_root}/attributes/point_attribute.cc" + "${draco_src_root}/attributes/point_attribute.h") + +list( + APPEND + draco_compression_attributes_dec_sources + "${draco_src_root}/compression/attributes/attributes_decoder.cc" + "${draco_src_root}/compression/attributes/attributes_decoder.h" + "${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.cc" + "${draco_src_root}/compression/attributes/kd_tree_attributes_decoder.h" + "${draco_src_root}/compression/attributes/kd_tree_attributes_shared.h" + "${draco_src_root}/compression/attributes/mesh_attribute_indices_encoding_data.h" + "${draco_src_root}/compression/attributes/normal_compression_utils.h" + "${draco_src_root}/compression/attributes/point_d_vector.h" + "${draco_src_root}/compression/attributes/sequential_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_decoder.h" + "${draco_src_root}/compression/attributes/sequential_attribute_decoders_controller.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_decoders_controller.h" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_decoder.h" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_decoder.h" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.cc" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_decoder.h" + ) + +list( + APPEND + draco_compression_attributes_enc_sources + "${draco_src_root}/compression/attributes/attributes_encoder.cc" + "${draco_src_root}/compression/attributes/attributes_encoder.h" + "${draco_src_root}/compression/attributes/kd_tree_attributes_encoder.cc" + "${draco_src_root}/compression/attributes/kd_tree_attributes_encoder.h" + "${draco_src_root}/compression/attributes/linear_sequencer.h" + "${draco_src_root}/compression/attributes/points_sequencer.h" + "${draco_src_root}/compression/attributes/sequential_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_encoder.h" + "${draco_src_root}/compression/attributes/sequential_attribute_encoders_controller.cc" + "${draco_src_root}/compression/attributes/sequential_attribute_encoders_controller.h" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_encoder.h" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_normal_attribute_encoder.h" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.cc" + "${draco_src_root}/compression/attributes/sequential_quantization_attribute_encoder.h" + ) + + +list( + APPEND + draco_compression_attributes_pred_schemes_dec_sources + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" + ) + +list( + APPEND + draco_compression_attributes_pred_schemes_enc_sources + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" + ) + +list( + APPEND + draco_compression_bit_coders_sources + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_coding_shared.h" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/adaptive_rans_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/direct_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/direct_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/direct_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/direct_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/folded_integer_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/folded_integer_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/rans_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/rans_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/rans_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/rans_bit_encoder.h" + "${draco_src_root}/compression/bit_coders/symbol_bit_decoder.cc" + "${draco_src_root}/compression/bit_coders/symbol_bit_decoder.h" + "${draco_src_root}/compression/bit_coders/symbol_bit_encoder.cc" + "${draco_src_root}/compression/bit_coders/symbol_bit_encoder.h") + +list(APPEND draco_enc_config_sources + "${draco_src_root}/compression/config/compression_shared.h" + "${draco_src_root}/compression/config/draco_options.h" + "${draco_src_root}/compression/config/encoder_options.h" + "${draco_src_root}/compression/config/encoding_features.h") + +list(APPEND draco_dec_config_sources + "${draco_src_root}/compression/config/compression_shared.h" + "${draco_src_root}/compression/config/decoder_options.h" + "${draco_src_root}/compression/config/draco_options.h") + +list(APPEND draco_compression_decode_sources + "${draco_src_root}/compression/decode.cc" + "${draco_src_root}/compression/decode.h") + +list(APPEND draco_compression_encode_sources + "${draco_src_root}/compression/encode.cc" + "${draco_src_root}/compression/encode.h" + "${draco_src_root}/compression/encode_base.h" + "${draco_src_root}/compression/expert_encode.cc" + "${draco_src_root}/compression/expert_encode.h") + +list( + APPEND + draco_compression_mesh_traverser_sources + "${draco_src_root}/compression/mesh/traverser/depth_first_traverser.h" + "${draco_src_root}/compression/mesh/traverser/max_prediction_degree_traverser.h" + "${draco_src_root}/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h" + "${draco_src_root}/compression/mesh/traverser/mesh_traversal_sequencer.h" + "${draco_src_root}/compression/mesh/traverser/traverser_base.h") + +list( + APPEND + draco_compression_mesh_dec_sources + "${draco_src_root}/compression/mesh/mesh_decoder.cc" + "${draco_src_root}/compression/mesh/mesh_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_decoder_impl_interface.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_shared.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h" + "${draco_src_root}/compression/mesh/mesh_sequential_decoder.cc" + "${draco_src_root}/compression/mesh/mesh_sequential_decoder.h") + +list( + APPEND + draco_compression_mesh_enc_sources + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_shared.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_encoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h" + "${draco_src_root}/compression/mesh/mesh_encoder.cc" + "${draco_src_root}/compression/mesh/mesh_encoder.h" + "${draco_src_root}/compression/mesh/mesh_sequential_encoder.cc" + "${draco_src_root}/compression/mesh/mesh_sequential_encoder.h") + +list( + APPEND + draco_compression_point_cloud_dec_sources + "${draco_src_root}/compression/point_cloud/point_cloud_decoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_decoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_decoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_decoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_decoder.h" + ) + +list( + APPEND + draco_compression_point_cloud_enc_sources + "${draco_src_root}/compression/point_cloud/point_cloud_encoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_encoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoder.h" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoder.h" + ) + +list(APPEND draco_compression_entropy_sources + "${draco_src_root}/compression/entropy/ans.h" + "${draco_src_root}/compression/entropy/rans_symbol_coding.h" + "${draco_src_root}/compression/entropy/rans_symbol_decoder.h" + "${draco_src_root}/compression/entropy/rans_symbol_encoder.h" + "${draco_src_root}/compression/entropy/shannon_entropy.cc" + "${draco_src_root}/compression/entropy/shannon_entropy.h" + "${draco_src_root}/compression/entropy/symbol_decoding.cc" + "${draco_src_root}/compression/entropy/symbol_decoding.h" + "${draco_src_root}/compression/entropy/symbol_encoding.cc" + "${draco_src_root}/compression/entropy/symbol_encoding.h") + +list(APPEND draco_core_sources + "${draco_src_root}/core/bit_utils.cc" + "${draco_src_root}/core/bit_utils.h" + "${draco_src_root}/core/bounding_box.cc" + "${draco_src_root}/core/bounding_box.h" + "${draco_src_root}/core/cycle_timer.cc" + "${draco_src_root}/core/cycle_timer.h" + "${draco_src_root}/core/data_buffer.cc" + "${draco_src_root}/core/data_buffer.h" + "${draco_src_root}/core/decoder_buffer.cc" + "${draco_src_root}/core/decoder_buffer.h" + "${draco_src_root}/core/divide.cc" + "${draco_src_root}/core/divide.h" + "${draco_src_root}/core/draco_index_type.h" + "${draco_src_root}/core/draco_index_type_vector.h" + "${draco_src_root}/core/draco_types.cc" + "${draco_src_root}/core/draco_types.h" + "${draco_src_root}/core/encoder_buffer.cc" + "${draco_src_root}/core/encoder_buffer.h" + "${draco_src_root}/core/hash_utils.cc" + "${draco_src_root}/core/hash_utils.h" + "${draco_src_root}/core/macros.h" + "${draco_src_root}/core/math_utils.h" + "${draco_src_root}/core/options.cc" + "${draco_src_root}/core/options.h" + "${draco_src_root}/core/quantization_utils.cc" + "${draco_src_root}/core/quantization_utils.h" + "${draco_src_root}/core/status.h" + "${draco_src_root}/core/status_or.h" + "${draco_src_root}/core/varint_decoding.h" + "${draco_src_root}/core/varint_encoding.h" + "${draco_src_root}/core/vector_d.h") + +list(APPEND draco_io_sources + "${draco_src_root}/io/file_reader_factory.cc" + "${draco_src_root}/io/file_reader_factory.h" + "${draco_src_root}/io/file_reader_interface.h" + "${draco_src_root}/io/file_utils.cc" + "${draco_src_root}/io/file_utils.h" + "${draco_src_root}/io/file_writer_factory.cc" + "${draco_src_root}/io/file_writer_factory.h" + "${draco_src_root}/io/file_writer_interface.h" + "${draco_src_root}/io/file_writer_utils.h" + "${draco_src_root}/io/file_writer_utils.cc" + "${draco_src_root}/io/mesh_io.cc" + "${draco_src_root}/io/mesh_io.h" + "${draco_src_root}/io/obj_decoder.cc" + "${draco_src_root}/io/obj_decoder.h" + "${draco_src_root}/io/obj_encoder.cc" + "${draco_src_root}/io/obj_encoder.h" + "${draco_src_root}/io/parser_utils.cc" + "${draco_src_root}/io/parser_utils.h" + "${draco_src_root}/io/ply_decoder.cc" + "${draco_src_root}/io/ply_decoder.h" + "${draco_src_root}/io/ply_encoder.cc" + "${draco_src_root}/io/ply_encoder.h" + "${draco_src_root}/io/ply_property_reader.h" + "${draco_src_root}/io/ply_property_writer.h" + "${draco_src_root}/io/ply_reader.cc" + "${draco_src_root}/io/ply_reader.h" + "${draco_src_root}/io/point_cloud_io.cc" + "${draco_src_root}/io/point_cloud_io.h" + "${draco_src_root}/io/stdio_file_reader.cc" + "${draco_src_root}/io/stdio_file_reader.h" + "${draco_src_root}/io/stdio_file_writer.cc" + "${draco_src_root}/io/stdio_file_writer.h") + +list(APPEND draco_mesh_sources + "${draco_src_root}/mesh/corner_table.cc" + "${draco_src_root}/mesh/corner_table.h" + "${draco_src_root}/mesh/corner_table_iterators.h" + "${draco_src_root}/mesh/mesh.cc" + "${draco_src_root}/mesh/mesh.h" + "${draco_src_root}/mesh/mesh_are_equivalent.cc" + "${draco_src_root}/mesh/mesh_are_equivalent.h" + "${draco_src_root}/mesh/mesh_attribute_corner_table.cc" + "${draco_src_root}/mesh/mesh_attribute_corner_table.h" + "${draco_src_root}/mesh/mesh_cleanup.cc" + "${draco_src_root}/mesh/mesh_cleanup.h" + "${draco_src_root}/mesh/mesh_misc_functions.cc" + "${draco_src_root}/mesh/mesh_misc_functions.h" + "${draco_src_root}/mesh/mesh_stripifier.cc" + "${draco_src_root}/mesh/mesh_stripifier.h" + "${draco_src_root}/mesh/triangle_soup_mesh_builder.cc" + "${draco_src_root}/mesh/triangle_soup_mesh_builder.h" + "${draco_src_root}/mesh/valence_cache.h") + +list(APPEND draco_point_cloud_sources + "${draco_src_root}/point_cloud/point_cloud.cc" + "${draco_src_root}/point_cloud/point_cloud.h" + "${draco_src_root}/point_cloud/point_cloud_builder.cc" + "${draco_src_root}/point_cloud/point_cloud_builder.h") + +list( + APPEND + draco_points_common_sources + "${draco_src_root}/compression/point_cloud/algorithms/point_cloud_compression_method.h" + "${draco_src_root}/compression/point_cloud/algorithms/point_cloud_types.h" + "${draco_src_root}/compression/point_cloud/algorithms/quantize_points_3.h" + "${draco_src_root}/compression/point_cloud/algorithms/queuing_policy.h") + +list( + APPEND + draco_points_dec_sources + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_decoder.h" + ) + +list( + APPEND + draco_points_enc_sources + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.cc" + "${draco_src_root}/compression/point_cloud/algorithms/float_points_tree_encoder.h" + ) + +list(APPEND draco_metadata_sources + "${draco_src_root}/metadata/geometry_metadata.cc" + "${draco_src_root}/metadata/geometry_metadata.h" + "${draco_src_root}/metadata/metadata.cc" + "${draco_src_root}/metadata/metadata.h") + +list(APPEND draco_metadata_enc_sources + "${draco_src_root}/metadata/metadata_encoder.cc" + "${draco_src_root}/metadata/metadata_encoder.h") + +list(APPEND draco_metadata_dec_sources + "${draco_src_root}/metadata/metadata_decoder.cc" + "${draco_src_root}/metadata/metadata_decoder.h") + +list(APPEND draco_animation_sources + "${draco_src_root}/animation/keyframe_animation.cc" + "${draco_src_root}/animation/keyframe_animation.h") + +list(APPEND draco_animation_enc_sources + "${draco_src_root}/animation/keyframe_animation_encoder.cc" + "${draco_src_root}/animation/keyframe_animation_encoder.h") + +list(APPEND draco_animation_dec_sources + "${draco_src_root}/animation/keyframe_animation_decoder.cc" + "${draco_src_root}/animation/keyframe_animation_decoder.h") + +list( + APPEND draco_js_dec_sources + "${draco_src_root}/javascript/emscripten/decoder_webidl_wrapper.cc" + "${draco_src_root}/javascript/emscripten/draco_decoder_glue_wrapper.cc" + ) + +list( + APPEND draco_js_enc_sources + "${draco_src_root}/javascript/emscripten/draco_encoder_glue_wrapper.cc" + "${draco_src_root}/javascript/emscripten/encoder_webidl_wrapper.cc") + +list( + APPEND + draco_animation_js_dec_sources + "${draco_src_root}/javascript/emscripten/animation_decoder_webidl_wrapper.cc" + "${draco_src_root}/javascript/emscripten/draco_animation_decoder_glue_wrapper.cc" + ) + +list( + APPEND + draco_animation_js_enc_sources + "${draco_src_root}/javascript/emscripten/animation_encoder_webidl_wrapper.cc" + "${draco_src_root}/javascript/emscripten/draco_animation_encoder_glue_wrapper.cc" + ) + +list(APPEND draco_unity_plug_sources + "${draco_src_root}/unity/draco_unity_plugin.cc" + "${draco_src_root}/unity/draco_unity_plugin.h") + +list(APPEND draco_maya_plug_sources + "${draco_src_root}/maya/draco_maya_plugin.cc" + "${draco_src_root}/maya/draco_maya_plugin.h") + +# +# Draco targets. +# +if(EMSCRIPTEN AND DRACO_JS_GLUE) + # Draco decoder and encoder "executable" targets in various flavors for + # Emsscripten. + list(APPEND draco_decoder_src + ${draco_attributes_sources} + ${draco_compression_attributes_dec_sources} + ${draco_compression_attributes_pred_schemes_dec_sources} + ${draco_compression_bit_coders_sources} + ${draco_compression_decode_sources} + ${draco_compression_entropy_sources} + ${draco_compression_mesh_traverser_sources} + ${draco_compression_mesh_dec_sources} + ${draco_compression_point_cloud_dec_sources} + ${draco_core_sources} + ${draco_dec_config_sources} + ${draco_js_dec_sources} + ${draco_mesh_sources} + ${draco_metadata_dec_sources} + ${draco_metadata_sources} + ${draco_point_cloud_sources} + ${draco_points_dec_sources}) + + list(APPEND draco_encoder_src + ${draco_attributes_sources} + ${draco_compression_attributes_enc_sources} + ${draco_compression_attributes_pred_schemes_enc_sources} + ${draco_compression_bit_coders_sources} + ${draco_compression_encode_sources} + ${draco_compression_entropy_sources} + ${draco_compression_mesh_traverser_sources} + ${draco_compression_mesh_enc_sources} + ${draco_compression_point_cloud_enc_sources} + ${draco_core_sources} + ${draco_enc_config_sources} + ${draco_js_enc_sources} + ${draco_mesh_sources} + ${draco_metadata_enc_sources} + ${draco_metadata_sources} + ${draco_point_cloud_sources} + ${draco_points_enc_sources}) + + list(APPEND draco_js_dec_idl + "${draco_src_root}/javascript/emscripten/draco_web_decoder.idl") + list(APPEND draco_js_enc_idl + "${draco_src_root}/javascript/emscripten/draco_web_encoder.idl") + list( + APPEND + draco_animation_js_dec_idl + "${draco_src_root}/javascript/emscripten/draco_animation_web_decoder.idl") + list( + APPEND + draco_animation_js_enc_idl + "${draco_src_root}/javascript/emscripten/draco_animation_web_encoder.idl") + list(APPEND draco_pre_link_js_sources + "${draco_src_root}/javascript/emscripten/prepareCallbacks.js" + "${draco_src_root}/javascript/emscripten/version.js") + list(APPEND draco_post_link_js_sources + "${draco_src_root}/javascript/emscripten/finalize.js") + list(APPEND draco_post_link_js_decoder_sources ${draco_post_link_js_sources} + "${draco_src_root}/javascript/emscripten/decoder_functions.js") + + set(draco_decoder_glue_path "${draco_build}/glue_decoder") + set(draco_encoder_glue_path "${draco_build}/glue_encoder") + + draco_generate_emscripten_glue(INPUT_IDL ${draco_js_dec_idl} OUTPUT_PATH + ${draco_decoder_glue_path}) + draco_generate_emscripten_glue(INPUT_IDL ${draco_js_enc_idl} OUTPUT_PATH + ${draco_encoder_glue_path}) + + if(DRACO_DECODER_ATTRIBUTE_DEDUPLICATION) + list(APPEND draco_decoder_features + "DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED" + "DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED") + endif() + + draco_add_emscripten_executable(NAME + draco_decoder + SOURCES + ${draco_decoder_src} + DEFINES + ${draco_defines} + FEATURES + ${draco_decoder_features} + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoDecoderModule\"" + GLUE_PATH + ${draco_decoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_decoder_sources}) + + draco_add_emscripten_executable( + NAME + draco_encoder + SOURCES + ${draco_encoder_src} + DEFINES + ${draco_defines} + FEATURES + DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED + DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoEncoderModule\"" + GLUE_PATH + ${draco_encoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_sources}) + + if(DRACO_ANIMATION_ENCODING) + set(draco_anim_decoder_glue_path "${draco_build}/glue_animation_decoder") + set(draco_anim_encoder_glue_path "${draco_build}/glue_animation_encoder") + + draco_generate_emscripten_glue(INPUT_IDL ${draco_animation_js_dec_idl} + OUTPUT_PATH ${draco_anim_decoder_glue_path}) + draco_generate_emscripten_glue(INPUT_IDL ${draco_animation_js_enc_idl} + OUTPUT_PATH ${draco_anim_encoder_glue_path}) + + draco_add_emscripten_executable( + NAME + draco_animation_decoder + SOURCES + ${draco_animation_dec_sources} + ${draco_animation_js_dec_sources} + ${draco_animation_sources} + ${draco_decoder_src} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoAnimationDecoderModule\"" + GLUE_PATH + ${draco_anim_decoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_decoder_sources}) + + draco_add_emscripten_executable( + NAME + draco_animation_encoder + SOURCES + ${draco_animation_enc_sources} + ${draco_animation_js_enc_sources} + ${draco_animation_sources} + ${draco_encoder_src} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LINK_FLAGS + "-sEXPORT_NAME=\"DracoAnimationEncoderModule\"" + GLUE_PATH + ${draco_anim_encoder_glue_path} + PRE_LINK_JS_SOURCES + ${draco_pre_link_js_sources} + POST_LINK_JS_SOURCES + ${draco_post_link_js_sources}) + endif() +else() + # Standard Draco libs, encoder and decoder. Object collections that mirror the + # Draco directory structure. + draco_add_library(NAME draco_attributes TYPE OBJECT SOURCES + ${draco_attributes_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME + draco_compression_attributes_dec + OBJECT + ${draco_compression_attributes_dec_sources} + TYPE + OBJECT + SOURCES + ${draco_compression_attributes_dec_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths}) + draco_add_library(NAME draco_compression_attributes_enc TYPE OBJECT SOURCES + ${draco_compression_attributes_enc_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_attributes_pred_schemes_dec TYPE + OBJECT SOURCES + ${draco_compression_attributes_pred_schemes_dec_sources}) + draco_add_library(NAME draco_compression_attributes_pred_schemes_enc TYPE + OBJECT SOURCES + ${draco_compression_attributes_pred_schemes_enc_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_bit_coders TYPE OBJECT SOURCES + ${draco_compression_bit_coders_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_enc_config TYPE OBJECT SOURCES + ${draco_enc_config_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_dec_config TYPE OBJECT SOURCES + ${draco_dec_config_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_decode TYPE OBJECT SOURCES + ${draco_compression_decode_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_encode TYPE OBJECT SOURCES + ${draco_compression_encode_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_entropy TYPE OBJECT SOURCES + ${draco_compression_entropy_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_mesh_traverser TYPE OBJECT SOURCES + ${draco_compression_mesh_traverser_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_mesh_dec TYPE OBJECT SOURCES + ${draco_compression_mesh_dec_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_mesh_enc TYPE OBJECT SOURCES + ${draco_compression_mesh_enc_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_point_cloud_dec TYPE OBJECT SOURCES + ${draco_compression_point_cloud_dec_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_compression_point_cloud_enc TYPE OBJECT SOURCES + ${draco_compression_point_cloud_enc_sources} DEFINES + ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_core TYPE OBJECT SOURCES ${draco_core_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_io TYPE OBJECT SOURCES ${draco_io_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_mesh TYPE OBJECT SOURCES ${draco_mesh_sources} + DEFINES ${draco_defines} INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_metadata_dec TYPE OBJECT SOURCES + ${draco_metadata_dec_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_metadata_enc TYPE OBJECT SOURCES + ${draco_metadata_enc_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_metadata TYPE OBJECT SOURCES + ${draco_metadata_sources} DEFINES ${draco_defines} INCLUDES + ${draco_include_paths}) + draco_add_library(NAME draco_animation_dec TYPE OBJECT SOURCES + ${draco_animation_dec_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_animation_enc TYPE OBJECT SOURCES + ${draco_animation_enc_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME draco_animation TYPE OBJECT SOURCES + ${draco_animation_sources} DEFINES ${draco_defines} INCLUDES + ${draco_include_paths}) + draco_add_library(NAME draco_point_cloud TYPE OBJECT SOURCES + ${draco_point_cloud_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + draco_add_library(NAME + draco_points_dec + TYPE + OBJECT + SOURCES + ${draco_points_common_sources} + ${draco_points_dec_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths}) + draco_add_library(NAME + draco_points_enc + TYPE + OBJECT + SOURCES + ${draco_points_common_sources} + ${draco_points_enc_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths}) + + set(draco_object_library_deps + draco_attributes + draco_compression_attributes_dec + draco_compression_attributes_enc + draco_compression_attributes_pred_schemes_dec + draco_compression_attributes_pred_schemes_enc + draco_compression_bit_coders + draco_compression_decode + draco_compression_encode + draco_compression_entropy + draco_compression_mesh_dec + draco_compression_mesh_enc + draco_compression_point_cloud_dec + draco_compression_point_cloud_enc + draco_core + draco_dec_config + draco_enc_config + draco_io + draco_mesh + draco_metadata + draco_metadata_dec + draco_metadata_enc + draco_animation + draco_animation_dec + draco_animation_enc + draco_point_cloud + draco_points_dec + draco_points_enc) + + # Library targets that consume the object collections. + if(MSVC OR WIN32) + # In order to produce a DLL and import library the Windows tools require + # that the exported symbols are part of the DLL target. The unfortunate side + # effect of this is that a single configuration cannot output both the + # static library and the DLL: This results in an either/or situation. + # Windows users of the draco build can have a DLL and an import library, + # or they can have a static library; they cannot have both from a single + # configuration of the build. + if(BUILD_SHARED_LIBS) + set(draco_lib_type SHARED) + else() + set(draco_lib_type STATIC) + endif() + draco_add_library(NAME + draco + OUTPUT_NAME + draco + TYPE + ${draco_lib_type} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + ${draco_object_library_deps}) + + else() + draco_add_library(NAME + draco_static + OUTPUT_NAME + draco + TYPE + STATIC + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + ${draco_object_library_deps}) + + if(BUILD_SHARED_LIBS) + draco_add_library(NAME + draco_shared + SOURCES + "${draco_src_root}/core/draco_version.h" + OUTPUT_NAME + draco + TYPE + SHARED + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LIB_DEPS + draco_static) + endif() + endif() + + if(DRACO_UNITY_PLUGIN) + if(IOS) + set(unity_decoder_lib_type STATIC) + else() + set(unity_decoder_lib_type MODULE) + endif() + + draco_add_library(NAME draco_unity_plugin TYPE OBJECT SOURCES + ${draco_unity_plug_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + + draco_add_library(NAME + dracodec_unity + TYPE + ${unity_decoder_lib_type} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + draco_unity_plugin + LIB_DEPS + ${draco_plugin_dependency}) + + # For Mac, we need to build a .bundle for the unity plugin. + if(APPLE) + set_target_properties(dracodec_unity PROPERTIES BUNDLE true) + elseif(NOT unity_decoder_lib_type STREQUAL STATIC) + set_target_properties(dracodec_unity + PROPERTIES SOVERSION ${DRACO_SOVERSION}) + endif() + endif() + + if(DRACO_MAYA_PLUGIN) + draco_add_library(NAME draco_maya_plugin TYPE OBJECT SOURCES + ${draco_maya_plug_sources} DEFINES ${draco_defines} + INCLUDES ${draco_include_paths}) + + draco_add_library(NAME + draco_maya_wrapper + TYPE + MODULE + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + OBJLIB_DEPS + draco_maya_plugin + LIB_DEPS + ${draco_plugin_dependency}) + + # For Mac, we need to build a .bundle for the plugin. + if(APPLE) + set_target_properties(draco_maya_wrapper PROPERTIES BUNDLE true) + else() + set_target_properties(draco_maya_wrapper + PROPERTIES SOVERSION ${DRACO_SOVERSION}) + endif() + endif() + + # Draco app targets. + draco_add_executable(NAME + draco_decoder + SOURCES + "${draco_src_root}/tools/draco_decoder.cc" + ${draco_io_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LIB_DEPS + ${draco_dependency}) + + draco_add_executable(NAME + draco_encoder + SOURCES + "${draco_src_root}/tools/draco_encoder.cc" + ${draco_io_sources} + DEFINES + ${draco_defines} + INCLUDES + ${draco_include_paths} + LIB_DEPS + ${draco_dependency}) + + draco_setup_install_target() + draco_setup_test_targets() +endif() + +if(DRACO_VERBOSE) + draco_dump_cmake_flag_variables() + draco_dump_tracked_configuration_variables() + draco_dump_options() +endif() diff --git a/contrib/draco/CONTRIBUTING.md b/contrib/draco/CONTRIBUTING.md new file mode 100644 index 000000000..b7bab3447 --- /dev/null +++ b/contrib/draco/CONTRIBUTING.md @@ -0,0 +1,27 @@ +Want to contribute? Great! First, read this page (including the small print at the end). + +### Before you contribute +Before we can use your code, you must sign the +[Google Individual Contributor License Agreement](https://cla.developers.google.com/about/google-individual) +(CLA), which you can do online. The CLA is necessary mainly because you own the +copyright to your changes, even after your contribution becomes part of our +codebase, so we need your permission to use and distribute your code. We also +need to be sure of various other things—for instance that you'll tell us if you +know that your code infringes on other people's patents. You don't have to sign +the CLA until after you've submitted your code for review and a member has +approved it, but you must do it before we can put your code into our codebase. +Before you start working on a larger contribution, you should get in touch with +us first through the issue tracker with your idea so that we can help out and +possibly guide you. Coordinating up front makes it much easier to avoid +frustration later on. + +### Code reviews +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. +Please make sure that your code conforms with our +[coding style guidelines](https://google.github.io/styleguide/cppguide.html). + +### The small print +Contributions made by corporations are covered by a different agreement than +the one above, the +[Software Grant and Corporate Contributor License Agreement](https://cla.developers.google.com/about/google-corporate). diff --git a/contrib/draco/LICENSE b/contrib/draco/LICENSE new file mode 100644 index 000000000..301095454 --- /dev/null +++ b/contrib/draco/LICENSE @@ -0,0 +1,252 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Files: docs/assets/js/ASCIIMathML.js + +Copyright (c) 2014 Peter Jipsen and other ASCIIMathML.js contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- +Files: docs/assets/css/pygments/* + +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/contrib/draco/README.md b/contrib/draco/README.md new file mode 100644 index 000000000..add66edcb --- /dev/null +++ b/contrib/draco/README.md @@ -0,0 +1,478 @@ +

+ +

+ +![Build Status: master](https://travis-ci.org/google/draco.svg?branch=master) + +News +======= +### Version 1.4.1 release +* Using the versioned gstatic.com WASM and Javascript decoders is now + recommended. To use v1.4.1, use this URL: + * https://www.gstatic.com/draco/versioned/decoders/1.4.1/* + * Replace the * with the files to load. E.g. + * https://gstatic.com/draco/versioned/decoders/1.4.1/draco_decoder.js + * This works with the v1.3.6 and v1.4.0 releases, and will work with future + Draco releases. +* Bug fixes + +### Version 1.4.0 release +* WASM and JavaScript decoders are hosted from a static URL. + * It is recommended to always pull your Draco WASM and JavaScript decoders from this URL: + * https://www.gstatic.com/draco/v1/decoders/* + * Replace * with the files to load. E.g. + * https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm + * Users will benefit from having the Draco decoder in cache as more sites start using the static URL +* Changed npm modules to use WASM, which increased performance by ~200%. +* Updated Emscripten to 2.0. + * This causes the Draco codec modules to return a promise instead of the module directly. + * Please see the example code on how to handle the promise. +* Changed NORMAL quantization default to 8. +* Added new array API to decoder and deprecated DecoderBuffer. + * See PR https://github.com/google/draco/issues/513 for more information. +* Changed WASM/JavaScript behavior of catching exceptions. + * See issue https://github.com/google/draco/issues/629 for more information. +* Code cleanup. +* Emscripten builds now disable NODEJS_CATCH_EXIT and NODEJS_CATCH_REJECTION. + * Authors of a CLI tool might want to add their own error handlers. +* Added Maya plugin builds. +* Unity plugin builds updated. + * Builds are now stored as archives. + * Added iOS build. + * Unity users may want to look into https://github.com/atteneder/DracoUnity. +* Bug fixes. + +### Version 1.3.6 release +* WASM and JavaScript decoders are now hosted from a static URL + * It is recommended to always pull your Draco WASM and JavaScript decoders from this URL: + * https://www.gstatic.com/draco/v1/decoders/* + * Replace * with the files to load. E.g. + * https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm + * Users will benefit from having the Draco decoder in cache as more sites start using the static URL +* Changed web examples to pull Draco decoders from static URL +* Added new API to Draco WASM decoder, which increased performance by ~15% +* Decreased Draco WASM decoder size by ~20% +* Added support for generic and multiple attributes to Draco Unity plug-ins +* Added new API to Draco Unity, which increased decoder performance by ~15% +* Changed quantization defaults: + * POSITION: 11 + * NORMAL: 7 + * TEX_COORD: 10 + * COLOR: 8 + * GENERIC: 8 +* Code cleanup +* Bug fixes + +### Version 1.3.5 release +* Added option to build Draco for Universal Scene Description +* Code cleanup +* Bug fixes + +### Version 1.3.4 release +* Released Draco Animation code +* Fixes for Unity +* Various file location and name changes + +### Version 1.3.3 release +* Added ExpertEncoder to the Javascript API + * Allows developers to set quantization options per attribute id +* Bug fixes + +### Version 1.3.2 release +* Bug fixes + +### Version 1.3.1 release +* Fix issue with multiple attributes when skipping an attribute transform + +### Version 1.3.0 release +* Improved kD-tree based point cloud encoding + * Now applicable to point clouds with any number of attributes + * Support for all integer attribute types and quantized floating point types +* Improved mesh compression up to 10% (on average ~2%) + * For meshes, the 1.3.0 bitstream is fully compatible with 1.2.x decoders +* Improved Javascript API + * Added support for all signed and unsigned integer types + * Added support for point clouds to our Javascript encoder API +* Added support for integer properties to the PLY decoder +* Bug fixes + +### Previous releases +https://github.com/google/draco/releases + +Description +=========== + +Draco is a library for compressing and decompressing 3D geometric [meshes] and +[point clouds]. It is intended to improve the storage and transmission of 3D +graphics. + +Draco was designed and built for compression efficiency and speed. The code +supports compressing points, connectivity information, texture coordinates, +color information, normals, and any other generic attributes associated with +geometry. With Draco, applications using 3D graphics can be significantly +smaller without compromising visual fidelity. For users, this means apps can +now be downloaded faster, 3D graphics in the browser can load quicker, and VR +and AR scenes can now be transmitted with a fraction of the bandwidth and +rendered quickly. + +Draco is released as C++ source code that can be used to compress 3D graphics +as well as C++ and Javascript decoders for the encoded data. + + +_**Contents**_ + + * [Building](#building) + * [Usage](#usage) + * [Unity](#unity) + * [WASM and JavaScript Decoders](#WASM-and-JavaScript-Decoders) + * [Command Line Applications](#command-line-applications) + * [Encoding Tool](#encoding-tool) + * [Encoding Point Clouds](#encoding-point-clouds) + * [Decoding Tool](#decoding-tool) + * [C++ Decoder API](#c-decoder-api) + * [Javascript Encoder API](#javascript-encoder-api) + * [Javascript Decoder API](#javascript-decoder-api) + * [Javascript Decoder Performance](#javascript-decoder-performance) + * [Metadata API](#metadata-api) + * [NPM Package](#npm-package) + * [three.js Renderer Example](#threejs-renderer-example) + * [Support](#support) + * [License](#license) + * [References](#references) + + +Building +======== +See [BUILDING](BUILDING.md) for building instructions. + + +Usage +====== + +Unity +----- +For the best information about using Unity with Draco please visit https://github.com/atteneder/DracoUnity + +For a simple example of using Unity with Draco see [README](unity/README.md) in the unity folder. + +WASM and JavaScript Decoders +---------------------------- + +It is recommended to always pull your Draco WASM and JavaScript decoders from: + +~~~~~ bash +https://www.gstatic.com/draco/v1/decoders/ +~~~~~ + +Users will benefit from having the Draco decoder in cache as more sites start using the static URL. + +Command Line Applications +------------------------ + +The default target created from the build files will be the `draco_encoder` +and `draco_decoder` command line applications. For both applications, if you +run them without any arguments or `-h`, the applications will output usage and +options. + +Encoding Tool +------------- + +`draco_encoder` will read OBJ or PLY files as input, and output Draco-encoded +files. We have included Stanford's [Bunny] mesh for testing. The basic command +line looks like this: + +~~~~~ bash +./draco_encoder -i testdata/bun_zipper.ply -o out.drc +~~~~~ + +A value of `0` for the quantization parameter will not perform any quantization +on the specified attribute. Any value other than `0` will quantize the input +values for the specified attribute to that number of bits. For example: + +~~~~~ bash +./draco_encoder -i testdata/bun_zipper.ply -o out.drc -qp 14 +~~~~~ + +will quantize the positions to 14 bits (default is 11 for the position +coordinates). + +In general, the more you quantize your attributes the better compression rate +you will get. It is up to your project to decide how much deviation it will +tolerate. In general, most projects can set quantization values of about `11` +without any noticeable difference in quality. + +The compression level (`-cl`) parameter turns on/off different compression +features. + +~~~~~ bash +./draco_encoder -i testdata/bun_zipper.ply -o out.drc -cl 8 +~~~~~ + +In general, the highest setting, `10`, will have the most compression but +worst decompression speed. `0` will have the least compression, but best +decompression speed. The default setting is `7`. + +Encoding Point Clouds +--------------------- + +You can encode point cloud data with `draco_encoder` by specifying the +`-point_cloud` parameter. If you specify the `-point_cloud` parameter with a +mesh input file, `draco_encoder` will ignore the connectivity data and encode +the positions from the mesh file. + +~~~~~ bash +./draco_encoder -point_cloud -i testdata/bun_zipper.ply -o out.drc +~~~~~ + +This command line will encode the mesh input as a point cloud, even though the +input might not produce compression that is representative of other point +clouds. Specifically, one can expect much better compression rates for larger +and denser point clouds. + +Decoding Tool +------------- + +`draco_decoder` will read Draco files as input, and output OBJ or PLY files. +The basic command line looks like this: + +~~~~~ bash +./draco_decoder -i in.drc -o out.obj +~~~~~ + +C++ Decoder API +------------- + +If you'd like to add decoding to your applications you will need to include +the `draco_dec` library. In order to use the Draco decoder you need to +initialize a `DecoderBuffer` with the compressed data. Then call +`DecodeMeshFromBuffer()` to return a decoded mesh object or call +`DecodePointCloudFromBuffer()` to return a decoded `PointCloud` object. For +example: + +~~~~~ cpp +draco::DecoderBuffer buffer; +buffer.Init(data.data(), data.size()); + +const draco::EncodedGeometryType geom_type = + draco::GetEncodedGeometryType(&buffer); +if (geom_type == draco::TRIANGULAR_MESH) { + unique_ptr mesh = draco::DecodeMeshFromBuffer(&buffer); +} else if (geom_type == draco::POINT_CLOUD) { + unique_ptr pc = draco::DecodePointCloudFromBuffer(&buffer); +} +~~~~~ + +Please see [src/draco/mesh/mesh.h](src/draco/mesh/mesh.h) for the full `Mesh` class interface and +[src/draco/point_cloud/point_cloud.h](src/draco/point_cloud/point_cloud.h) for the full `PointCloud` class interface. + + +Javascript Encoder API +---------------------- +The Javascript encoder is located in `javascript/draco_encoder.js`. The encoder +API can be used to compress mesh and point cloud. In order to use the encoder, +you need to first create an instance of `DracoEncoderModule`. Then use this +instance to create `MeshBuilder` and `Encoder` objects. `MeshBuilder` is used +to construct a mesh from geometry data that could be later compressed by +`Encoder`. First create a mesh object using `new encoderModule.Mesh()` . Then, +use `AddFacesToMesh()` to add indices to the mesh and use +`AddFloatAttributeToMesh()` to add attribute data to the mesh, e.g. position, +normal, color and texture coordinates. After a mesh is constructed, you could +then use `EncodeMeshToDracoBuffer()` to compress the mesh. For example: + +~~~~~ js +const mesh = { + indices : new Uint32Array(indices), + vertices : new Float32Array(vertices), + normals : new Float32Array(normals) +}; + +const encoderModule = DracoEncoderModule(); +const encoder = new encoderModule.Encoder(); +const meshBuilder = new encoderModule.MeshBuilder(); +const dracoMesh = new encoderModule.Mesh(); + +const numFaces = mesh.indices.length / 3; +const numPoints = mesh.vertices.length; +meshBuilder.AddFacesToMesh(dracoMesh, numFaces, mesh.indices); + +meshBuilder.AddFloatAttributeToMesh(dracoMesh, encoderModule.POSITION, + numPoints, 3, mesh.vertices); +if (mesh.hasOwnProperty('normals')) { + meshBuilder.AddFloatAttributeToMesh( + dracoMesh, encoderModule.NORMAL, numPoints, 3, mesh.normals); +} +if (mesh.hasOwnProperty('colors')) { + meshBuilder.AddFloatAttributeToMesh( + dracoMesh, encoderModule.COLOR, numPoints, 3, mesh.colors); +} +if (mesh.hasOwnProperty('texcoords')) { + meshBuilder.AddFloatAttributeToMesh( + dracoMesh, encoderModule.TEX_COORD, numPoints, 3, mesh.texcoords); +} + +if (method === "edgebreaker") { + encoder.SetEncodingMethod(encoderModule.MESH_EDGEBREAKER_ENCODING); +} else if (method === "sequential") { + encoder.SetEncodingMethod(encoderModule.MESH_SEQUENTIAL_ENCODING); +} + +const encodedData = new encoderModule.DracoInt8Array(); +// Use default encoding setting. +const encodedLen = encoder.EncodeMeshToDracoBuffer(dracoMesh, + encodedData); +encoderModule.destroy(dracoMesh); +encoderModule.destroy(encoder); +encoderModule.destroy(meshBuilder); + +~~~~~ +Please see [src/draco/javascript/emscripten/draco_web_encoder.idl](src/draco/javascript/emscripten/draco_web_encoder.idl) for the full API. + +Javascript Decoder API +---------------------- + +The Javascript decoder is located in [javascript/draco_decoder.js](javascript/draco_decoder.js). The +Javascript decoder can decode mesh and point cloud. In order to use the +decoder, you must first create an instance of `DracoDecoderModule`. The +instance is then used to create `DecoderBuffer` and `Decoder` objects. Set +the encoded data in the `DecoderBuffer`. Then call `GetEncodedGeometryType()` +to identify the type of geometry, e.g. mesh or point cloud. Then call either +`DecodeBufferToMesh()` or `DecodeBufferToPointCloud()`, which will return +a Mesh object or a point cloud. For example: + +~~~~~ js +// Create the Draco decoder. +const decoderModule = DracoDecoderModule(); +const buffer = new decoderModule.DecoderBuffer(); +buffer.Init(byteArray, byteArray.length); + +// Create a buffer to hold the encoded data. +const decoder = new decoderModule.Decoder(); +const geometryType = decoder.GetEncodedGeometryType(buffer); + +// Decode the encoded geometry. +let outputGeometry; +let status; +if (geometryType == decoderModule.TRIANGULAR_MESH) { + outputGeometry = new decoderModule.Mesh(); + status = decoder.DecodeBufferToMesh(buffer, outputGeometry); +} else { + outputGeometry = new decoderModule.PointCloud(); + status = decoder.DecodeBufferToPointCloud(buffer, outputGeometry); +} + +// You must explicitly delete objects created from the DracoDecoderModule +// or Decoder. +decoderModule.destroy(outputGeometry); +decoderModule.destroy(decoder); +decoderModule.destroy(buffer); +~~~~~ + +Please see [src/draco/javascript/emscripten/draco_web_decoder.idl](src/draco/javascript/emscripten/draco_web_decoder.idl) for the full API. + +Javascript Decoder Performance +------------------------------ + +The Javascript decoder is built with dynamic memory. This will let the decoder +work with all of the compressed data. But this option is not the fastest. +Pre-allocating the memory sees about a 2x decoder speed improvement. If you +know all of your project's memory requirements, you can turn on static memory +by changing `CMakeLists.txt` accordingly. + +Metadata API +------------ +Starting from v1.0, Draco provides metadata functionality for encoding data +other than geometry. It could be used to encode any custom data along with the +geometry. For example, we can enable metadata functionality to encode the name +of attributes, name of sub-objects and customized information. +For one mesh and point cloud, it can have one top-level geometry metadata class. +The top-level metadata then can have hierarchical metadata. Other than that, +the top-level metadata can have metadata for each attribute which is called +attribute metadata. The attribute metadata should be initialized with the +correspondent attribute id within the mesh. The metadata API is provided both +in C++ and Javascript. +For example, to add metadata in C++: + +~~~~~ cpp +draco::PointCloud pc; +// Add metadata for the geometry. +std::unique_ptr metadata = + std::unique_ptr(new draco::GeometryMetadata()); +metadata->AddEntryString("description", "This is an example."); +pc.AddMetadata(std::move(metadata)); + +// Add metadata for attributes. +draco::GeometryAttribute pos_att; +pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3, + draco::DT_FLOAT32, false, 12, 0); +const uint32_t pos_att_id = pc.AddAttribute(pos_att, false, 0); + +std::unique_ptr pos_metadata = + std::unique_ptr( + new draco::AttributeMetadata(pos_att_id)); +pos_metadata->AddEntryString("name", "position"); + +// Directly add attribute metadata to geometry. +// You can do this without explicitly add |GeometryMetadata| to mesh. +pc.AddAttributeMetadata(pos_att_id, std::move(pos_metadata)); +~~~~~ + +To read metadata from a geometry in C++: + +~~~~~ cpp +// Get metadata for the geometry. +const draco::GeometryMetadata *pc_metadata = pc.GetMetadata(); + +// Request metadata for a specific attribute. +const draco::AttributeMetadata *requested_pos_metadata = + pc.GetAttributeMetadataByStringEntry("name", "position"); +~~~~~ + +Please see [src/draco/metadata](src/draco/metadata) and [src/draco/point_cloud](src/draco/point_cloud) for the full API. + +NPM Package +----------- +Draco NPM NodeJS package is located in [javascript/npm/draco3d](javascript/npm/draco3d). Please see the +doc in the folder for detailed usage. + +three.js Renderer Example +------------------------- + +Here's an [example] of a geometric compressed with Draco loaded via a +Javascript decoder using the `three.js` renderer. + +Please see the [javascript/example/README.md](javascript/example/README.md) file for more information. + +Support +======= + +For questions/comments please email + +If you have found an error in this library, please file an issue at + + +Patches are encouraged, and may be submitted by forking this project and +submitting a pull request through GitHub. See [CONTRIBUTING] for more detail. + +License +======= +Licensed under the Apache License, Version 2.0 (the "License"); you may not +use this file except in compliance with the License. You may obtain a copy of +the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations under +the License. + +References +========== +[example]:https://storage.googleapis.com/demos.webmproject.org/draco/draco_loader_throw.html +[meshes]: https://en.wikipedia.org/wiki/Polygon_mesh +[point clouds]: https://en.wikipedia.org/wiki/Point_cloud +[Bunny]: https://graphics.stanford.edu/data/3Dscanrep/ +[CONTRIBUTING]: https://raw.githubusercontent.com/google/draco/master/CONTRIBUTING.md + +Bunny model from Stanford's graphic department diff --git a/contrib/draco/cmake/DracoConfig.cmake b/contrib/draco/cmake/DracoConfig.cmake new file mode 100644 index 000000000..be5e1faef --- /dev/null +++ b/contrib/draco/cmake/DracoConfig.cmake @@ -0,0 +1,3 @@ +@PACKAGE_INIT@ +set_and_check(draco_INCLUDE_DIR "@PACKAGE_draco_include_install_dir@") +set_and_check(draco_LIBRARY_DIR "@PACKAGE_draco_lib_install_dir@") diff --git a/contrib/draco/cmake/FindDraco.cmake b/contrib/draco/cmake/FindDraco.cmake new file mode 100644 index 000000000..0a9193065 --- /dev/null +++ b/contrib/draco/cmake/FindDraco.cmake @@ -0,0 +1,56 @@ +# Finddraco +# +# Locates draco and sets the following variables: +# +# draco_FOUND draco_INCLUDE_DIRS draco_LIBARY_DIRS draco_LIBRARIES +# draco_VERSION_STRING +# +# draco_FOUND is set to YES only when all other variables are successfully +# configured. + +unset(draco_FOUND) +unset(draco_INCLUDE_DIRS) +unset(draco_LIBRARY_DIRS) +unset(draco_LIBRARIES) +unset(draco_VERSION_STRING) + +mark_as_advanced(draco_FOUND) +mark_as_advanced(draco_INCLUDE_DIRS) +mark_as_advanced(draco_LIBRARY_DIRS) +mark_as_advanced(draco_LIBRARIES) +mark_as_advanced(draco_VERSION_STRING) + +set(draco_version_file_no_prefix "draco/src/draco/core/draco_version.h") + +# Set draco_INCLUDE_DIRS +find_path(draco_INCLUDE_DIRS NAMES "${draco_version_file_no_prefix}") + +# Extract the version string from draco_version.h. +if(draco_INCLUDE_DIRS) + set(draco_version_file + "${draco_INCLUDE_DIRS}/draco/src/draco/core/draco_version.h") + file(STRINGS "${draco_version_file}" draco_version REGEX "kdracoVersion") + list(GET draco_version 0 draco_version) + string(REPLACE "static const char kdracoVersion[] = " "" draco_version + "${draco_version}") + string(REPLACE ";" "" draco_version "${draco_version}") + string(REPLACE "\"" "" draco_version "${draco_version}") + set(draco_VERSION_STRING ${draco_version}) +endif() + +# Find the library. +if(BUILD_SHARED_LIBS) + find_library(draco_LIBRARIES NAMES draco.dll libdraco.dylib libdraco.so) +else() + find_library(draco_LIBRARIES NAMES draco.lib libdraco.a) +endif() + +# Store path to library. +get_filename_component(draco_LIBRARY_DIRS ${draco_LIBRARIES} DIRECTORY) + +if(draco_INCLUDE_DIRS + AND draco_LIBRARY_DIRS + AND draco_LIBRARIES + AND draco_VERSION_STRING) + set(draco_FOUND YES) +endif() diff --git a/contrib/draco/cmake/compiler_flags.cmake b/contrib/draco/cmake/compiler_flags.cmake new file mode 100644 index 000000000..8750e6f7d --- /dev/null +++ b/contrib/draco/cmake/compiler_flags.cmake @@ -0,0 +1,220 @@ +if(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_ 1) + +include(CheckCCompilerFlag) +include(CheckCXXCompilerFlag) +include("${draco_root}/cmake/compiler_tests.cmake") + +# Strings used to cache failed C/CXX flags. +set(DRACO_FAILED_C_FLAGS) +set(DRACO_FAILED_CXX_FLAGS) + +# Checks C compiler for support of $c_flag. Adds $c_flag to $CMAKE_C_FLAGS when +# the compile test passes. Caches $c_flag in $DRACO_FAILED_C_FLAGS when the test +# fails. +macro(add_c_flag_if_supported c_flag) + unset(C_FLAG_FOUND CACHE) + string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND) + unset(C_FLAG_FAILED CACHE) + string(FIND "${DRACO_FAILED_C_FLAGS}" "${c_flag}" C_FLAG_FAILED) + + if(${C_FLAG_FOUND} EQUAL -1 AND ${C_FLAG_FAILED} EQUAL -1) + unset(C_FLAG_SUPPORTED CACHE) + message("Checking C compiler flag support for: " ${c_flag}) + check_c_compiler_flag("${c_flag}" C_FLAG_SUPPORTED) + if(${C_FLAG_SUPPORTED}) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${c_flag}" CACHE STRING "") + else() + set(DRACO_FAILED_C_FLAGS + "${DRACO_FAILED_C_FLAGS} ${c_flag}" + CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Checks C++ compiler for support of $cxx_flag. Adds $cxx_flag to +# $CMAKE_CXX_FLAGS when the compile test passes. Caches $c_flag in +# $DRACO_FAILED_CXX_FLAGS when the test fails. +macro(add_cxx_flag_if_supported cxx_flag) + unset(CXX_FLAG_FOUND CACHE) + string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND) + unset(CXX_FLAG_FAILED CACHE) + string(FIND "${DRACO_FAILED_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FAILED) + + if(${CXX_FLAG_FOUND} EQUAL -1 AND ${CXX_FLAG_FAILED} EQUAL -1) + unset(CXX_FLAG_SUPPORTED CACHE) + message("Checking CXX compiler flag support for: " ${cxx_flag}) + check_cxx_compiler_flag("${cxx_flag}" CXX_FLAG_SUPPORTED) + if(${CXX_FLAG_SUPPORTED}) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${cxx_flag}" CACHE STRING "") + else() + set(DRACO_FAILED_CXX_FLAGS + "${DRACO_FAILED_CXX_FLAGS} ${cxx_flag}" + CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Convenience method for adding a flag to both the C and C++ compiler command +# lines. +macro(add_compiler_flag_if_supported flag) + add_c_flag_if_supported(${flag}) + add_cxx_flag_if_supported(${flag}) +endmacro() + +# Checks C compiler for support of $c_flag and terminates generation when +# support is not present. +macro(require_c_flag c_flag update_c_flags) + unset(C_FLAG_FOUND CACHE) + string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND) + + if(${C_FLAG_FOUND} EQUAL -1) + unset(HAVE_C_FLAG CACHE) + message("Checking C compiler flag support for: " ${c_flag}) + check_c_compiler_flag("${c_flag}" HAVE_C_FLAG) + if(NOT ${HAVE_C_FLAG}) + message( + FATAL_ERROR "${PROJECT_NAME} requires support for C flag: ${c_flag}.") + endif() + if(${update_c_flags}) + set(CMAKE_C_FLAGS "${c_flag} ${CMAKE_C_FLAGS}" CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Checks CXX compiler for support of $cxx_flag and terminates generation when +# support is not present. +macro(require_cxx_flag cxx_flag update_cxx_flags) + unset(CXX_FLAG_FOUND CACHE) + string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND) + + if(${CXX_FLAG_FOUND} EQUAL -1) + unset(HAVE_CXX_FLAG CACHE) + message("Checking CXX compiler flag support for: " ${cxx_flag}) + check_cxx_compiler_flag("${cxx_flag}" HAVE_CXX_FLAG) + if(NOT ${HAVE_CXX_FLAG}) + message( + FATAL_ERROR + "${PROJECT_NAME} requires support for CXX flag: ${cxx_flag}.") + endif() + if(${update_cxx_flags}) + set(CMAKE_CXX_FLAGS + "${cxx_flag} ${CMAKE_CXX_FLAGS}" + CACHE STRING "" FORCE) + endif() + endif() +endmacro() + +# Checks for support of $flag by both the C and CXX compilers. Terminates +# generation when support is not present in both compilers. +macro(require_compiler_flag flag update_cmake_flags) + require_c_flag(${flag} ${update_cmake_flags}) + require_cxx_flag(${flag} ${update_cmake_flags}) +endmacro() + +# Checks only non-MSVC targets for support of $c_flag and terminates generation +# when support is not present. +macro(require_c_flag_nomsvc c_flag update_c_flags) + if(NOT MSVC) + require_c_flag(${c_flag} ${update_c_flags}) + endif() +endmacro() + +# Checks only non-MSVC targets for support of $cxx_flag and terminates +# generation when support is not present. +macro(require_cxx_flag_nomsvc cxx_flag update_cxx_flags) + if(NOT MSVC) + require_cxx_flag(${cxx_flag} ${update_cxx_flags}) + endif() +endmacro() + +# Checks only non-MSVC targets for support of $flag by both the C and CXX +# compilers. Terminates generation when support is not present in both +# compilers. +macro(require_compiler_flag_nomsvc flag update_cmake_flags) + require_c_flag_nomsvc(${flag} ${update_cmake_flags}) + require_cxx_flag_nomsvc(${flag} ${update_cmake_flags}) +endmacro() + +# Adds $flag to assembler command line. +macro(append_as_flag flag) + unset(AS_FLAG_FOUND CACHE) + string(FIND "${DRACO_AS_FLAGS}" "${flag}" AS_FLAG_FOUND) + + if(${AS_FLAG_FOUND} EQUAL -1) + set(DRACO_AS_FLAGS "${DRACO_AS_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the C compiler command line. +macro(append_c_flag flag) + unset(C_FLAG_FOUND CACHE) + string(FIND "${CMAKE_C_FLAGS}" "${flag}" C_FLAG_FOUND) + + if(${C_FLAG_FOUND} EQUAL -1) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the CXX compiler command line. +macro(append_cxx_flag flag) + unset(CXX_FLAG_FOUND CACHE) + string(FIND "${CMAKE_CXX_FLAGS}" "${flag}" CXX_FLAG_FOUND) + + if(${CXX_FLAG_FOUND} EQUAL -1) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the C and CXX compiler command lines. +macro(append_compiler_flag flag) + append_c_flag(${flag}) + append_cxx_flag(${flag}) +endmacro() + +# Adds $flag to the executable linker command line. +macro(append_exe_linker_flag flag) + unset(LINKER_FLAG_FOUND CACHE) + string(FIND "${CMAKE_EXE_LINKER_FLAGS}" "${flag}" LINKER_FLAG_FOUND) + + if(${LINKER_FLAG_FOUND} EQUAL -1) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${flag}") + endif() +endmacro() + +# Adds $flag to the link flags for $target. +function(append_link_flag_to_target target flags) + unset(target_link_flags) + get_target_property(target_link_flags ${target} LINK_FLAGS) + + if(target_link_flags) + unset(link_flag_found) + string(FIND "${target_link_flags}" "${flags}" link_flag_found) + + if(NOT ${link_flag_found} EQUAL -1) + return() + endif() + + set(target_link_flags "${target_link_flags} ${flags}") + else() + set(target_link_flags "${flags}") + endif() + + set_target_properties(${target} PROPERTIES LINK_FLAGS ${target_link_flags}) +endfunction() + +# Adds $flag to executable linker flags, and makes sure C/CXX builds still work. +macro(require_linker_flag flag) + append_exe_linker_flag(${flag}) + + unset(c_passed) + draco_check_c_compiles("LINKER_FLAG_C_TEST(${flag})" "" c_passed) + unset(cxx_passed) + draco_check_cxx_compiles("LINKER_FLAG_CXX_TEST(${flag})" "" cxx_passed) + + if(NOT c_passed OR NOT cxx_passed) + message(FATAL_ERROR "Linker flag test for ${flag} failed.") + endif() +endmacro() diff --git a/contrib/draco/cmake/compiler_tests.cmake b/contrib/draco/cmake/compiler_tests.cmake new file mode 100644 index 000000000..e781a6537 --- /dev/null +++ b/contrib/draco/cmake/compiler_tests.cmake @@ -0,0 +1,103 @@ +if(DRACO_CMAKE_COMPILER_TESTS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_COMPILER_TESTS_CMAKE_ 1) + +include(CheckCSourceCompiles) +include(CheckCXXSourceCompiles) + +# The basic main() macro used in all compile tests. +set(DRACO_C_MAIN "\nint main(void) { return 0; }") +set(DRACO_CXX_MAIN "\nint main() { return 0; }") + +# Strings containing the names of passed and failed tests. +set(DRACO_C_PASSED_TESTS) +set(DRACO_C_FAILED_TESTS) +set(DRACO_CXX_PASSED_TESTS) +set(DRACO_CXX_FAILED_TESTS) + +macro(draco_push_var var new_value) + set(SAVED_${var} ${var}) + set(${var} ${new_value}) +endmacro() + +macro(draco_pop_var var) + set(var ${SAVED_${var}}) + unset(SAVED_${var}) +endmacro() + +# Confirms $test_source compiles and stores $test_name in one of +# $DRACO_C_PASSED_TESTS or $DRACO_C_FAILED_TESTS depending on out come. When the +# test passes $result_var is set to 1. When it fails $result_var is unset. The +# test is not run if the test name is found in either of the passed or failed +# test variables. +macro(draco_check_c_compiles test_name test_source result_var) + unset(C_TEST_PASSED CACHE) + unset(C_TEST_FAILED CACHE) + string(FIND "${DRACO_C_PASSED_TESTS}" "${test_name}" C_TEST_PASSED) + string(FIND "${DRACO_C_FAILED_TESTS}" "${test_name}" C_TEST_FAILED) + if(${C_TEST_PASSED} EQUAL -1 AND ${C_TEST_FAILED} EQUAL -1) + unset(C_TEST_COMPILED CACHE) + message("Running C compiler test: ${test_name}") + check_c_source_compiles("${test_source} ${DRACO_C_MAIN}" C_TEST_COMPILED) + set(${result_var} ${C_TEST_COMPILED}) + + if(${C_TEST_COMPILED}) + set(DRACO_C_PASSED_TESTS "${DRACO_C_PASSED_TESTS} ${test_name}") + else() + set(DRACO_C_FAILED_TESTS "${DRACO_C_FAILED_TESTS} ${test_name}") + message("C Compiler test ${test_name} failed.") + endif() + elseif(NOT ${C_TEST_PASSED} EQUAL -1) + set(${result_var} 1) + else() # ${C_TEST_FAILED} NOT EQUAL -1 + unset(${result_var}) + endif() +endmacro() + +# Confirms $test_source compiles and stores $test_name in one of +# $DRACO_CXX_PASSED_TESTS or $DRACO_CXX_FAILED_TESTS depending on out come. When +# the test passes $result_var is set to 1. When it fails $result_var is unset. +# The test is not run if the test name is found in either of the passed or +# failed test variables. +macro(draco_check_cxx_compiles test_name test_source result_var) + unset(CXX_TEST_PASSED CACHE) + unset(CXX_TEST_FAILED CACHE) + string(FIND "${DRACO_CXX_PASSED_TESTS}" "${test_name}" CXX_TEST_PASSED) + string(FIND "${DRACO_CXX_FAILED_TESTS}" "${test_name}" CXX_TEST_FAILED) + if(${CXX_TEST_PASSED} EQUAL -1 AND ${CXX_TEST_FAILED} EQUAL -1) + unset(CXX_TEST_COMPILED CACHE) + message("Running CXX compiler test: ${test_name}") + check_cxx_source_compiles("${test_source} ${DRACO_CXX_MAIN}" + CXX_TEST_COMPILED) + set(${result_var} ${CXX_TEST_COMPILED}) + + if(${CXX_TEST_COMPILED}) + set(DRACO_CXX_PASSED_TESTS "${DRACO_CXX_PASSED_TESTS} ${test_name}") + else() + set(DRACO_CXX_FAILED_TESTS "${DRACO_CXX_FAILED_TESTS} ${test_name}") + message("CXX Compiler test ${test_name} failed.") + endif() + elseif(NOT ${CXX_TEST_PASSED} EQUAL -1) + set(${result_var} 1) + else() # ${CXX_TEST_FAILED} NOT EQUAL -1 + unset(${result_var}) + endif() +endmacro() + +# Convenience macro that confirms $test_source compiles as C and C++. +# $result_var is set to 1 when both tests are successful, and 0 when one or both +# tests fail. Note: This macro is intended to be used to write to result +# variables that are expanded via configure_file(). $result_var is set to 1 or 0 +# to allow direct usage of the value in generated source files. +macro(draco_check_source_compiles test_name test_source result_var) + unset(C_PASSED) + unset(CXX_PASSED) + draco_check_c_compiles(${test_name} ${test_source} C_PASSED) + draco_check_cxx_compiles(${test_name} ${test_source} CXX_PASSED) + if(${C_PASSED} AND ${CXX_PASSED}) + set(${result_var} 1) + else() + set(${result_var} 0) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco-config.cmake.template b/contrib/draco/cmake/draco-config.cmake.template new file mode 100644 index 000000000..ca4a456bf --- /dev/null +++ b/contrib/draco/cmake/draco-config.cmake.template @@ -0,0 +1,2 @@ +set(DRACO_INCLUDE_DIRS "@DRACO_INCLUDE_DIRS@") +set(DRACO_LIBRARIES "draco") diff --git a/contrib/draco/cmake/draco.pc.template b/contrib/draco/cmake/draco.pc.template new file mode 100644 index 000000000..b8ae48212 --- /dev/null +++ b/contrib/draco/cmake/draco.pc.template @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: @PROJECT_NAME@ +Description: Draco geometry de(com)pression library. +Version: @DRACO_VERSION@ +Cflags: -I${includedir} +Libs: -L${libdir} -ldraco +Libs.private: @CMAKE_THREAD_LIBS_INIT@ diff --git a/contrib/draco/cmake/draco_build_definitions.cmake b/contrib/draco/cmake/draco_build_definitions.cmake new file mode 100644 index 000000000..c1ada6206 --- /dev/null +++ b/contrib/draco/cmake/draco_build_definitions.cmake @@ -0,0 +1,117 @@ +if(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_ +set(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_ 1) + +# Utility for controlling the main draco library dependency. This changes in +# shared builds, and when an optional target requires a shared library build. +macro(set_draco_target) + if(MSVC OR WIN32) + set(draco_dependency draco) + set(draco_plugin_dependency ${draco_dependency}) + else() + if(BUILD_SHARED_LIBS) + set(draco_dependency draco_shared) + else() + set(draco_dependency draco_static) + endif() + set(draco_plugin_dependency draco_static) + endif() + + if(BUILD_SHARED_LIBS) + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif() +endmacro() + +# Configures flags and sets build system globals. +macro(draco_set_build_definitions) + string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_lowercase) + + if(build_type_lowercase MATCHES "rel" AND DRACO_FAST) + if(MSVC) + list(APPEND draco_msvc_cxx_flags "/Ox") + else() + list(APPEND draco_base_cxx_flags "-O3") + endif() + endif() + + draco_load_version_info() + set(DRACO_SOVERSION 1) + + list(APPEND draco_include_paths "${draco_root}" "${draco_root}/src" + "${draco_build}") + + if(DRACO_ABSL) + list(APPEND draco_include_path "${draco_root}/third_party/abseil-cpp") + endif() + + + list(APPEND draco_gtest_include_paths + "${draco_root}/../googletest/googlemock/include" + "${draco_root}/../googletest/googlemock" + "${draco_root}/../googletest/googletest/include" + "${draco_root}/../googletest/googletest") + list(APPEND draco_test_include_paths ${draco_include_paths} + ${draco_gtest_include_paths}) + list(APPEND draco_defines "DRACO_CMAKE=1" + "DRACO_FLAGS_SRCDIR=\"${draco_root}\"" + "DRACO_FLAGS_TMPDIR=\"/tmp\"") + + if(MSVC OR WIN32) + list(APPEND draco_defines "_CRT_SECURE_NO_DEPRECATE=1" "NOMINMAX=1") + + if(BUILD_SHARED_LIBS) + set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE) + endif() + endif() + + if(ANDROID) + if(CMAKE_ANDROID_ARCH_ABI STREQUAL "armeabi-v7a") + set(CMAKE_ANDROID_ARM_MODE ON) + endif() + endif() + + set_draco_target() + + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6") + # Quiet warnings in copy-list-initialization where {} elision has always + # been allowed. + list(APPEND draco_clang_cxx_flags "-Wno-missing-braces") + endif() + endif() + + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "7") + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7") + # Quiet gcc 6 vs 7 abi warnings: + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77728 + list(APPEND draco_base_cxx_flags "-Wno-psabi") + list(APPEND ABSL_GCC_FLAGS "-Wno-psabi") + endif() + endif() + endif() + + # Source file names ending in these suffixes will have the appropriate + # compiler flags added to their compile commands to enable intrinsics. + set(draco_neon_source_file_suffix "neon.cc") + set(draco_sse4_source_file_suffix "sse4.cc") + + if((${CMAKE_CXX_COMPILER_ID} + STREQUAL + "GNU" + AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 5) + OR (${CMAKE_CXX_COMPILER_ID} + STREQUAL + "Clang" + AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4)) + message( + WARNING "GNU/GCC < v5 or Clang/LLVM < v4, ENABLING COMPATIBILITY MODE.") + draco_enable_feature(FEATURE "DRACO_OLD_GCC") + endif() + + if(EMSCRIPTEN) + draco_check_emscripten_environment() + draco_get_required_emscripten_flags(FLAG_LIST_VAR draco_base_cxx_flags) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_cpu_detection.cmake b/contrib/draco/cmake/draco_cpu_detection.cmake new file mode 100644 index 000000000..96e4a289b --- /dev/null +++ b/contrib/draco/cmake/draco_cpu_detection.cmake @@ -0,0 +1,28 @@ +if(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_ +set(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_ 1) + +# Detect optimizations available for the current target CPU. +macro(draco_optimization_detect) + if(DRACO_ENABLE_OPTIMIZATIONS) + string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" cpu_lowercase) + if(cpu_lowercase MATCHES "^arm|^aarch64") + set(draco_have_neon ON) + elseif(cpu_lowercase MATCHES "^x86|amd64") + set(draco_have_sse4 ON) + endif() + endif() + + if(draco_have_neon AND DRACO_ENABLE_NEON) + list(APPEND draco_defines "DRACO_ENABLE_NEON=1") + else() + list(APPEND draco_defines "DRACO_ENABLE_NEON=0") + endif() + + if(draco_have_sse4 AND DRACO_ENABLE_SSE4_1) + list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=1") + else() + list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=0") + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_emscripten.cmake b/contrib/draco/cmake/draco_emscripten.cmake new file mode 100644 index 000000000..10c935043 --- /dev/null +++ b/contrib/draco/cmake/draco_emscripten.cmake @@ -0,0 +1,185 @@ +if(DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_ + +# Checks environment for Emscripten prerequisites. +macro(draco_check_emscripten_environment) + if(NOT PYTHONINTERP_FOUND) + message( + FATAL_ERROR + "Python required for Emscripten builds, but cmake cannot find it.") + endif() + if(NOT EXISTS "$ENV{EMSCRIPTEN}") + message( + FATAL_ERROR + "The EMSCRIPTEN environment variable must be set. See README.md.") + endif() +endmacro() + +# Obtains the required Emscripten flags for Draco targets. +macro(draco_get_required_emscripten_flags) + set(em_FLAG_LIST_VAR) + set(em_flags) + set(em_single_arg_opts FLAG_LIST_VAR) + set(em_multi_arg_opts) + cmake_parse_arguments(em "${em_flags}" "${em_single_arg_opts}" + "${em_multi_arg_opts}" ${ARGN}) + if(NOT em_FLAG_LIST_VAR) + message(FATAL "draco_get_required_emscripten_flags: FLAG_LIST_VAR required") + endif() + + if(DRACO_JS_GLUE) + unset(required_flags) + list(APPEND ${em_FLAG_LIST_VAR} "-sALLOW_MEMORY_GROWTH=1") + list(APPEND ${em_FLAG_LIST_VAR} "-Wno-almost-asm") + list(APPEND ${em_FLAG_LIST_VAR} "--memory-init-file" "0") + list(APPEND ${em_FLAG_LIST_VAR} "-fno-omit-frame-pointer") + list(APPEND ${em_FLAG_LIST_VAR} "-sMODULARIZE=1") + list(APPEND ${em_FLAG_LIST_VAR} "-sNO_FILESYSTEM=1") + list(APPEND ${em_FLAG_LIST_VAR} "-sEXPORTED_RUNTIME_METHODS=[]") + list(APPEND ${em_FLAG_LIST_VAR} "-sPRECISE_F32=1") + list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_EXIT=0") + list(APPEND ${em_FLAG_LIST_VAR} "-sNODEJS_CATCH_REJECTION=0") + + if(DRACO_FAST) + list(APPEND ${em_FLAG_LIST_VAR} "--llvm-lto" "1") + endif() + if(DRACO_WASM) + list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=1") + else() + list(APPEND ${em_FLAG_LIST_VAR} "-sWASM=0") + endif() + if(DRACO_IE_COMPATIBLE) + list(APPEND ${em_FLAG_LIST_VAR} "-sLEGACY_VM_SUPPORT=1") + endif() + endif() +endmacro() + +# Macro for generating C++ glue code from IDL for Emscripten targets. Executes +# python to generate the C++ binding, and establishes dendency: $OUTPUT_PATH.cpp +# on $INPUT_IDL. +macro(draco_generate_emscripten_glue) + set(glue_flags) + set(glue_single_arg_opts INPUT_IDL OUTPUT_PATH) + set(glue_multi_arg_opts) + cmake_parse_arguments(glue "${glue_flags}" "${glue_single_arg_opts}" + "${glue_multi_arg_opts}" ${ARGN}) + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_generate_emscripten_glue -----------\n" + "glue_INPUT_IDL=${glue_INPUT_IDL}\n" + "glue_OUTPUT_PATH=${glue_OUTPUT_PATH}\n" ] + "----------------------------------------------------\n") + endif() + + if(NOT glue_INPUT_IDL OR NOT glue_OUTPUT_PATH) + message( + FATAL_ERROR + "draco_generate_emscripten_glue: INPUT_IDL and OUTPUT_PATH required.") + endif() + + # Generate the glue source. + execute_process(COMMAND ${PYTHON_EXECUTABLE} + $ENV{EMSCRIPTEN}/tools/webidl_binder.py + ${glue_INPUT_IDL} ${glue_OUTPUT_PATH}) + if(NOT EXISTS "${glue_OUTPUT_PATH}.cpp") + message(FATAL_ERROR "JS glue generation failed for ${glue_INPUT_IDL}.") + endif() + + # Create a dependency so that it regenerated on edits. + add_custom_command(OUTPUT "${glue_OUTPUT_PATH}.cpp" + COMMAND ${PYTHON_EXECUTABLE} + $ENV{EMSCRIPTEN}/tools/webidl_binder.py + ${glue_INPUT_IDL} ${glue_OUTPUT_PATH} + DEPENDS ${draco_js_dec_idl} + COMMENT "Generating ${glue_OUTPUT_PATH}.cpp." + WORKING_DIRECTORY ${draco_build} + VERBATIM) +endmacro() + +# Wrapper for draco_add_executable() that handles the extra work necessary for +# emscripten targets when generating JS glue: +# +# ~~~ +# - Set source level dependency on the C++ binding. +# - Pre/Post link emscripten magic. +# +# Required args: +# - GLUE_PATH: Base path for glue file. Used to generate .cpp and .js files. +# - PRE_LINK_JS_SOURCES: em_link_pre_js() source files. +# - POST_LINK_JS_SOURCES: em_link_post_js() source files. +# Optional args: +# - FEATURES: +# ~~~ +macro(draco_add_emscripten_executable) + unset(emexe_NAME) + unset(emexe_FEATURES) + unset(emexe_SOURCES) + unset(emexe_DEFINES) + unset(emexe_INCLUDES) + unset(emexe_LINK_FLAGS) + set(optional_args) + set(single_value_args NAME GLUE_PATH) + set(multi_value_args SOURCES DEFINES FEATURES INCLUDES LINK_FLAGS + PRE_LINK_JS_SOURCES POST_LINK_JS_SOURCES) + + cmake_parse_arguments(emexe "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT + (emexe_GLUE_PATH + AND emexe_POST_LINK_JS_SOURCES + AND emexe_PRE_LINK_JS_SOURCES)) + message(FATAL + "draco_add_emscripten_executable: GLUE_PATH PRE_LINK_JS_SOURCES " + "POST_LINK_JS_SOURCES args required.") + endif() + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_add_emscripten_executable ---------\n" + "emexe_NAME=${emexe_NAME}\n" + "emexe_SOURCES=${emexe_SOURCES}\n" + "emexe_DEFINES=${emexe_DEFINES}\n" + "emexe_INCLUDES=${emexe_INCLUDES}\n" + "emexe_LINK_FLAGS=${emexe_LINK_FLAGS}\n" + "emexe_GLUE_PATH=${emexe_GLUE_PATH}\n" + "emexe_FEATURES=${emexe_FEATURES}\n" + "emexe_PRE_LINK_JS_SOURCES=${emexe_PRE_LINK_JS_SOURCES}\n" + "emexe_POST_LINK_JS_SOURCES=${emexe_POST_LINK_JS_SOURCES}\n" + "----------------------------------------------------\n") + endif() + + # The Emscripten linker needs the C++ flags in addition to whatever has been + # passed in with the target. + list(APPEND emexe_LINK_FLAGS ${DRACO_CXX_FLAGS}) + + if(DRACO_GLTF) + draco_add_executable(NAME + ${emexe_NAME} + OUTPUT_NAME + ${emexe_NAME}_gltf + SOURCES + ${emexe_SOURCES} + DEFINES + ${emexe_DEFINES} + INCLUDES + ${emexe_INCLUDES} + LINK_FLAGS + ${emexe_LINK_FLAGS}) + else() + draco_add_executable(NAME ${emexe_NAME} SOURCES ${emexe_SOURCES} DEFINES + ${emexe_DEFINES} INCLUDES ${emexe_INCLUDES} LINK_FLAGS + ${emexe_LINK_FLAGS}) + endif() + + foreach(feature ${emexe_FEATURES}) + draco_enable_feature(FEATURE ${feature} TARGETS ${emexe_NAME}) + endforeach() + + set_property(SOURCE ${emexe_SOURCES} + APPEND + PROPERTY OBJECT_DEPENDS "${emexe_GLUE_PATH}.cpp") + em_link_pre_js(${emexe_NAME} ${emexe_PRE_LINK_JS_SOURCES}) + em_link_post_js(${emexe_NAME} "${emexe_GLUE_PATH}.js" + ${emexe_POST_LINK_JS_SOURCES}) +endmacro() diff --git a/contrib/draco/cmake/draco_features.cmake b/contrib/draco/cmake/draco_features.cmake new file mode 100644 index 000000000..be444bf24 --- /dev/null +++ b/contrib/draco/cmake/draco_features.cmake @@ -0,0 +1,63 @@ +if(DRACO_CMAKE_DRACO_FEATURES_CMAKE_) + return() +endif() +set(DRACO_CMAKE_DRACO_FEATURES_CMAKE_ 1) + +set(draco_features_file_name "${draco_build_dir}/draco/draco_features.h") +set(draco_features_list) + +# Macro that handles tracking of Draco preprocessor symbols for the purpose of +# producing draco_features.h. +# +# draco_enable_feature(FEATURE [TARGETS ]) FEATURE +# is required. It should be a Draco preprocessor symbol. TARGETS is optional. It +# can be one or more draco targets. +# +# When the TARGETS argument is not present the preproc symbol is added to +# draco_features.h. When it is draco_features.h is unchanged, and +# target_compile_options() is called for each target specified. +macro(draco_enable_feature) + set(def_flags) + set(def_single_arg_opts FEATURE) + set(def_multi_arg_opts TARGETS) + cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}" + "${def_multi_arg_opts}" ${ARGN}) + if("${DEF_FEATURE}" STREQUAL "") + message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().") + endif() + + # Do nothing/return early if $DEF_FEATURE is already in the list. + list(FIND draco_features_list ${DEF_FEATURE} df_index) + if(NOT df_index EQUAL -1) + return() + endif() + + list(LENGTH DEF_TARGETS df_targets_list_length) + if(${df_targets_list_length} EQUAL 0) + list(APPEND draco_features_list ${DEF_FEATURE}) + else() + foreach(target ${DEF_TARGETS}) + target_compile_definitions(${target} PRIVATE ${DEF_FEATURE}) + endforeach() + endif() +endmacro() + +# Function for generating draco_features.h. +function(draco_generate_features_h) + file(WRITE "${draco_features_file_name}.new" + "// GENERATED FILE -- DO NOT EDIT\n\n" "#ifndef DRACO_FEATURES_H_\n" + "#define DRACO_FEATURES_H_\n\n") + + foreach(feature ${draco_features_list}) + file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n") + endforeach() + + file(APPEND "${draco_features_file_name}.new" + "\n#endif // DRACO_FEATURES_H_") + + # Will replace ${draco_features_file_name} only if the file content has + # changed. This prevents forced Draco rebuilds after CMake runs. + configure_file("${draco_features_file_name}.new" + "${draco_features_file_name}") + file(REMOVE "${draco_features_file_name}.new") +endfunction() diff --git a/contrib/draco/cmake/draco_flags.cmake b/contrib/draco/cmake/draco_flags.cmake new file mode 100644 index 000000000..cb9d489e6 --- /dev/null +++ b/contrib/draco/cmake/draco_flags.cmake @@ -0,0 +1,238 @@ +if(DRACO_CMAKE_DRACO_FLAGS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_FLAGS_CMAKE_ +set(DRACO_CMAKE_DRACO_FLAGS_CMAKE_ 1) + +include(CheckCXXCompilerFlag) +include(CheckCXXSourceCompiles) + +# Adds compiler flags specified by FLAGS to the sources specified by SOURCES: +# +# draco_set_compiler_flags_for_sources(SOURCES FLAGS ) +macro(draco_set_compiler_flags_for_sources) + unset(compiler_SOURCES) + unset(compiler_FLAGS) + unset(optional_args) + unset(single_value_args) + set(multi_value_args SOURCES FLAGS) + cmake_parse_arguments(compiler "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT (compiler_SOURCES AND compiler_FLAGS)) + draco_die("draco_set_compiler_flags_for_sources: SOURCES and " + "FLAGS required.") + endif() + + set_source_files_properties(${compiler_SOURCES} PROPERTIES COMPILE_FLAGS + ${compiler_FLAGS}) + + if(DRACO_VERBOSE GREATER 1) + foreach(source ${compiler_SOURCES}) + foreach(flag ${compiler_FLAGS}) + message("draco_set_compiler_flags_for_sources: source:${source} " + "flag:${flag}") + endforeach() + endforeach() + endif() +endmacro() + +# Tests compiler flags stored in list(s) specified by FLAG_LIST_VAR_NAMES, adds +# flags to $DRACO_CXX_FLAGS when tests pass. Terminates configuration if +# FLAG_REQUIRED is specified and any flag check fails. +# +# ~~~ +# draco_test_cxx_flag(> +# [FLAG_REQUIRED]) +# ~~~ +macro(draco_test_cxx_flag) + unset(cxx_test_FLAG_LIST_VAR_NAMES) + unset(cxx_test_FLAG_REQUIRED) + unset(single_value_args) + set(optional_args FLAG_REQUIRED) + set(multi_value_args FLAG_LIST_VAR_NAMES) + cmake_parse_arguments(cxx_test "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT cxx_test_FLAG_LIST_VAR_NAMES) + draco_die("draco_test_cxx_flag: FLAG_LIST_VAR_NAMES required") + endif() + + unset(cxx_flags) + foreach(list_var ${cxx_test_FLAG_LIST_VAR_NAMES}) + if(DRACO_VERBOSE) + message("draco_test_cxx_flag: adding ${list_var} to cxx_flags") + endif() + list(APPEND cxx_flags ${${list_var}}) + endforeach() + + if(DRACO_VERBOSE) + message("CXX test: all flags: ${cxx_flags}") + endif() + + unset(all_cxx_flags) + list(APPEND all_cxx_flags ${DRACO_CXX_FLAGS} ${cxx_flags}) + + # Turn off output from check_cxx_source_compiles. Print status directly + # instead since the logging messages from check_cxx_source_compiles can be + # quite confusing. + set(CMAKE_REQUIRED_QUIET TRUE) + + # Run the actual compile test. + unset(draco_all_cxx_flags_pass CACHE) + message("--- Running combined CXX flags test, flags: ${all_cxx_flags}") + check_cxx_compiler_flag("${all_cxx_flags}" draco_all_cxx_flags_pass) + + if(cxx_test_FLAG_REQUIRED AND NOT draco_all_cxx_flags_pass) + draco_die("Flag test failed for required flag(s): " + "${all_cxx_flags} and FLAG_REQUIRED specified.") + endif() + + if(draco_all_cxx_flags_pass) + # Test passed: update the global flag list used by the draco target creation + # wrappers. + set(DRACO_CXX_FLAGS ${cxx_flags}) + list(REMOVE_DUPLICATES DRACO_CXX_FLAGS) + + if(DRACO_VERBOSE) + message("DRACO_CXX_FLAGS=${DRACO_CXX_FLAGS}") + endif() + + message("--- Passed combined CXX flags test") + else() + message("--- Failed combined CXX flags test, testing flags individually.") + + if(cxx_flags) + message("--- Testing flags from $cxx_flags: " "${cxx_flags}") + foreach(cxx_flag ${cxx_flags}) + # Since 3.17.0 check_cxx_compiler_flag() sets a normal variable at + # parent scope while check_cxx_source_compiles() continues to set an + # internal cache variable, so we unset both to avoid the failure / + # success state persisting between checks. This has been fixed in newer + # CMake releases, but 3.17 is pretty common: we will need this to avoid + # weird build breakages while the fix propagates. + unset(cxx_flag_test_passed) + unset(cxx_flag_test_passed CACHE) + message("--- Testing flag: ${cxx_flag}") + check_cxx_compiler_flag("${cxx_flag}" cxx_flag_test_passed) + + if(cxx_flag_test_passed) + message("--- Passed test for ${cxx_flag}") + else() + list(REMOVE_ITEM cxx_flags ${cxx_flag}) + message("--- Failed test for ${cxx_flag}, flag removed.") + endif() + endforeach() + + set(DRACO_CXX_FLAGS ${cxx_flags}) + endif() + endif() + + if(DRACO_CXX_FLAGS) + list(REMOVE_DUPLICATES DRACO_CXX_FLAGS) + endif() +endmacro() + +# Tests executable linker flags stored in list specified by FLAG_LIST_VAR_NAME, +# adds flags to $DRACO_EXE_LINKER_FLAGS when test passes. Terminates +# configuration when flag check fails. draco_set_cxx_flags() must be called +# before calling this macro because it assumes $DRACO_CXX_FLAGS contains only +# valid CXX flags. +# +# draco_test_exe_linker_flag() +macro(draco_test_exe_linker_flag) + unset(link_FLAG_LIST_VAR_NAME) + unset(optional_args) + unset(multi_value_args) + set(single_value_args FLAG_LIST_VAR_NAME) + cmake_parse_arguments(link "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT link_FLAG_LIST_VAR_NAME) + draco_die("draco_test_link_flag: FLAG_LIST_VAR_NAME required") + endif() + + draco_set_and_stringify(DEST linker_flags SOURCE_VARS + ${link_FLAG_LIST_VAR_NAME}) + + if(DRACO_VERBOSE) + message("EXE LINKER test: all flags: ${linker_flags}") + endif() + + # Tests of $DRACO_CXX_FLAGS have already passed. Include them with the linker + # test. + draco_set_and_stringify(DEST CMAKE_REQUIRED_FLAGS SOURCE_VARS DRACO_CXX_FLAGS) + + # Cache the global exe linker flags. + if(CMAKE_EXE_LINKER_FLAGS) + set(cached_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS}) + draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags}) + endif() + + draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags} + ${CMAKE_EXE_LINKER_FLAGS}) + + # Turn off output from check_cxx_source_compiles. Print status directly + # instead since the logging messages from check_cxx_source_compiles can be + # quite confusing. + set(CMAKE_REQUIRED_QUIET TRUE) + + message("--- Running EXE LINKER test for flags: ${linker_flags}") + + unset(linker_flag_test_passed CACHE) + set(draco_cxx_main "\nint main() { return 0; }") + check_cxx_source_compiles("${draco_cxx_main}" linker_flag_test_passed) + + if(NOT linker_flag_test_passed) + draco_die("EXE LINKER test failed.") + endif() + + message("--- Passed EXE LINKER flag test.") + + # Restore cached global exe linker flags. + if(cached_CMAKE_EXE_LINKER_FLAGS) + set(CMAKE_EXE_LINKER_FLAGS ${cached_CMAKE_EXE_LINKER_FLAGS}) + else() + unset(CMAKE_EXE_LINKER_FLAGS) + endif() +endmacro() + +# Runs the draco compiler tests. This macro builds up the list of list var(s) +# that is passed to draco_test_cxx_flag(). +# +# Note: draco_set_build_definitions() must be called before this macro. +macro(draco_set_cxx_flags) + unset(cxx_flag_lists) + + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + list(APPEND cxx_flag_lists draco_base_cxx_flags) + endif() + + # Append clang flags after the base set to allow -Wno* overrides to take + # effect. Some of the base flags may enable a large set of warnings, e.g., + # -Wall. + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + list(APPEND cxx_flag_lists draco_clang_cxx_flags) + endif() + + if(MSVC) + list(APPEND cxx_flag_lists draco_msvc_cxx_flags) + endif() + + draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists}) + if(DRACO_VERBOSE) + message("draco_set_cxx_flags: internal CXX flags: ${cxx_flags}") + endif() + + if(DRACO_CXX_FLAGS) + list(APPEND cxx_flag_lists DRACO_CXX_FLAGS) + if(DRACO_VERBOSE) + message("draco_set_cxx_flags: user CXX flags: ${DRACO_CXX_FLAGS}") + endif() + endif() + + draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists}) + + if(cxx_flags) + draco_test_cxx_flag(FLAG_LIST_VAR_NAMES ${cxx_flag_lists}) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_helpers.cmake b/contrib/draco/cmake/draco_helpers.cmake new file mode 100644 index 000000000..0b3b804cf --- /dev/null +++ b/contrib/draco/cmake/draco_helpers.cmake @@ -0,0 +1,110 @@ +if(DRACO_CMAKE_DRACO_HELPERS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_HELPERS_CMAKE_ +set(DRACO_CMAKE_DRACO_HELPERS_CMAKE_ 1) + +# Kills build generation using message(FATAL_ERROR) and outputs all data passed +# to the console via use of $ARGN. +macro(draco_die) + message(FATAL_ERROR ${ARGN}) +endmacro() + +# Converts semi-colon delimited list variable(s) to string. Output is written to +# variable supplied via the DEST parameter. Input is from an expanded variable +# referenced by SOURCE and/or variable(s) referenced by SOURCE_VARS. +macro(draco_set_and_stringify) + set(optional_args) + set(single_value_args DEST SOURCE_VAR) + set(multi_value_args SOURCE SOURCE_VARS) + cmake_parse_arguments(sas "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT sas_DEST OR NOT (sas_SOURCE OR sas_SOURCE_VARS)) + draco_die("draco_set_and_stringify: DEST and at least one of SOURCE " + "SOURCE_VARS required.") + endif() + + unset(${sas_DEST}) + + if(sas_SOURCE) + # $sas_SOURCE is one or more expanded variables, just copy the values to + # $sas_DEST. + set(${sas_DEST} "${sas_SOURCE}") + endif() + + if(sas_SOURCE_VARS) + # $sas_SOURCE_VARS is one or more variable names. Each iteration expands a + # variable and appends it to $sas_DEST. + foreach(source_var ${sas_SOURCE_VARS}) + set(${sas_DEST} "${${sas_DEST}} ${${source_var}}") + endforeach() + + # Because $sas_DEST can be empty when entering this scope leading whitespace + # can be introduced to $sas_DEST on the first iteration of the above loop. + # Remove it: + string(STRIP "${${sas_DEST}}" ${sas_DEST}) + endif() + + # Lists in CMake are simply semicolon delimited strings, so stringification is + # just a find and replace of the semicolon. + string(REPLACE ";" " " ${sas_DEST} "${${sas_DEST}}") + + if(DRACO_VERBOSE GREATER 1) + message("draco_set_and_stringify: ${sas_DEST}=${${sas_DEST}}") + endif() +endmacro() + +# Creates a dummy source file in $DRACO_GENERATED_SOURCES_DIRECTORY and adds it +# to the specified target. Optionally adds its path to a list variable. +# +# draco_create_dummy_source_file( BASENAME > +# [LISTVAR ]) +macro(draco_create_dummy_source_file) + set(optional_args) + set(single_value_args TARGET BASENAME LISTVAR) + set(multi_value_args) + cmake_parse_arguments(cdsf "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT cdsf_TARGET OR NOT cdsf_BASENAME) + draco_die("draco_create_dummy_source_file: TARGET and BASENAME required.") + endif() + + if(NOT DRACO_GENERATED_SOURCES_DIRECTORY) + set(DRACO_GENERATED_SOURCES_DIRECTORY "${draco_build}/gen_src") + endif() + + set(dummy_source_dir "${DRACO_GENERATED_SOURCES_DIRECTORY}") + set(dummy_source_file + "${dummy_source_dir}/draco_${cdsf_TARGET}_${cdsf_BASENAME}.cc") + set(dummy_source_code + "// Generated file. DO NOT EDIT!\n" + "// C++ source file created for target ${cdsf_TARGET}.\n" + "void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void)\;\n" + "void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void) {}\n") + file(WRITE "${dummy_source_file}" ${dummy_source_code}) + + target_sources(${cdsf_TARGET} PRIVATE ${dummy_source_file}) + + if(cdsf_LISTVAR) + list(APPEND ${cdsf_LISTVAR} "${dummy_source_file}") + endif() +endmacro() + +# Loads the version string from $draco_source/draco/version.h and sets +# $DRACO_VERSION. +macro(draco_load_version_info) + file(STRINGS "${draco_src_root}/core/draco_version.h" version_file_strings) + foreach(str ${version_file_strings}) + if(str MATCHES "char kDracoVersion") + string(FIND "${str}" "\"" open_quote_pos) + string(FIND "${str}" ";" semicolon_pos) + math(EXPR open_quote_pos "${open_quote_pos} + 1") + math(EXPR close_quote_pos "${semicolon_pos} - 1") + math(EXPR version_string_length "${close_quote_pos} - ${open_quote_pos}") + string(SUBSTRING "${str}" ${open_quote_pos} ${version_string_length} + DRACO_VERSION) + break() + endif() + endforeach() +endmacro() diff --git a/contrib/draco/cmake/draco_install.cmake b/contrib/draco/cmake/draco_install.cmake new file mode 100644 index 000000000..5c63ecb4a --- /dev/null +++ b/contrib/draco/cmake/draco_install.cmake @@ -0,0 +1,79 @@ +if(DRACO_CMAKE_DRACO_INSTALL_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_INSTALL_CMAKE_ +set(DRACO_CMAKE_DRACO_INSTALL_CMAKE_ 1) + +# Sets up the draco install targets. Must be called after the static library +# target is created. +macro(draco_setup_install_target) + include(GNUInstallDirs) + + # pkg-config: draco.pc + set(prefix "${CMAKE_INSTALL_PREFIX}") + set(exec_prefix "\${prefix}") + set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}") + set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") + set(draco_lib_name "draco") + + configure_file("${draco_root}/cmake/draco.pc.template" + "${draco_build}/draco.pc" @ONLY NEWLINE_STYLE UNIX) + install(FILES "${draco_build}/draco.pc" + DESTINATION "${prefix}/${CMAKE_INSTALL_LIBDIR}/pkgconfig") + + # CMake config: draco-config.cmake + set(DRACO_INCLUDE_DIRS "${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") + configure_file("${draco_root}/cmake/draco-config.cmake.template" + "${draco_build}/draco-config.cmake" @ONLY NEWLINE_STYLE UNIX) + install( + FILES "${draco_build}/draco-config.cmake" + DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/cmake") + + foreach(file ${draco_sources}) + if(file MATCHES "h$") + list(APPEND draco_api_includes ${file}) + endif() + endforeach() + + # Strip $draco_src_root from the file paths: we need to install relative to + # $include_directory. + list(TRANSFORM draco_api_includes REPLACE "${draco_src_root}/" "") + set(include_directory "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}") + + foreach(draco_api_include ${draco_api_includes}) + get_filename_component(file_directory ${draco_api_include} DIRECTORY) + set(target_directory "${include_directory}/draco/${file_directory}") + install(FILES ${draco_src_root}/${draco_api_include} + DESTINATION "${target_directory}") + endforeach() + + install( + FILES "${draco_build}/draco/draco_features.h" + DESTINATION "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/draco/") + + install(TARGETS draco_decoder DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}") + install(TARGETS draco_encoder DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}") + + if(WIN32) + install(TARGETS draco DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + else() + install(TARGETS draco_static DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + if(BUILD_SHARED_LIBS) + install(TARGETS draco_shared DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + endif() + endif() + + if(DRACO_UNITY_PLUGIN) + install(TARGETS dracodec_unity DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + endif() + if(DRACO_MAYA_PLUGIN) + install(TARGETS draco_maya_wrapper DESTINATION + "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") + endif() + +endmacro() diff --git a/contrib/draco/cmake/draco_intrinsics.cmake b/contrib/draco/cmake/draco_intrinsics.cmake new file mode 100644 index 000000000..9011c0de5 --- /dev/null +++ b/contrib/draco/cmake/draco_intrinsics.cmake @@ -0,0 +1,96 @@ +if(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_ +set(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_ 1) + +# Returns the compiler flag for the SIMD intrinsics suffix specified by the +# SUFFIX argument via the variable specified by the VARIABLE argument: +# draco_get_intrinsics_flag_for_suffix(SUFFIX VARIABLE ) +macro(draco_get_intrinsics_flag_for_suffix) + unset(intrinsics_SUFFIX) + unset(intrinsics_VARIABLE) + unset(optional_args) + unset(multi_value_args) + set(single_value_args SUFFIX VARIABLE) + cmake_parse_arguments(intrinsics "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT (intrinsics_SUFFIX AND intrinsics_VARIABLE)) + message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: SUFFIX and " + "VARIABLE required.") + endif() + + if(intrinsics_SUFFIX MATCHES "neon") + if(NOT MSVC) + set(${intrinsics_VARIABLE} "${DRACO_NEON_INTRINSICS_FLAG}") + endif() + elseif(intrinsics_SUFFIX MATCHES "sse4") + if(NOT MSVC) + set(${intrinsics_VARIABLE} "-msse4.1") + endif() + else() + message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: Unknown " + "instrinics suffix: ${intrinsics_SUFFIX}") + endif() + + if(DRACO_VERBOSE GREATER 1) + message("draco_get_intrinsics_flag_for_suffix: " + "suffix:${intrinsics_SUFFIX} flag:${${intrinsics_VARIABLE}}") + endif() +endmacro() + +# Processes source files specified by SOURCES and adds intrinsics flags as +# necessary: draco_process_intrinsics_sources(SOURCES ) +# +# Detects requirement for intrinsics flags using source file name suffix. +# Currently supports only SSE4.1. +macro(draco_process_intrinsics_sources) + unset(arg_TARGET) + unset(arg_SOURCES) + unset(optional_args) + set(single_value_args TARGET) + set(multi_value_args SOURCES) + cmake_parse_arguments(arg "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + if(NOT (arg_TARGET AND arg_SOURCES)) + message(FATAL_ERROR "draco_process_intrinsics_sources: TARGET and " + "SOURCES required.") + endif() + + if(DRACO_ENABLE_SSE4_1 AND draco_have_sse4) + unset(sse4_sources) + list(APPEND sse4_sources ${arg_SOURCES}) + + list(FILTER sse4_sources INCLUDE REGEX + "${draco_sse4_source_file_suffix}$") + + if(sse4_sources) + unset(sse4_flags) + draco_get_intrinsics_flag_for_suffix(SUFFIX + ${draco_sse4_source_file_suffix} + VARIABLE sse4_flags) + if(sse4_flags) + draco_set_compiler_flags_for_sources(SOURCES ${sse4_sources} FLAGS + ${sse4_flags}) + endif() + endif() + endif() + + if(DRACO_ENABLE_NEON AND draco_have_neon) + unset(neon_sources) + list(APPEND neon_sources ${arg_SOURCES}) + list(FILTER neon_sources INCLUDE REGEX + "${draco_neon_source_file_suffix}$") + + if(neon_sources AND DRACO_NEON_INTRINSICS_FLAG) + unset(neon_flags) + draco_get_intrinsics_flag_for_suffix(SUFFIX + ${draco_neon_source_file_suffix} + VARIABLE neon_flags) + if(neon_flags) + draco_set_compiler_flags_for_sources(SOURCES ${neon_sources} FLAGS + ${neon_flags}) + endif() + endif() + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_options.cmake b/contrib/draco/cmake/draco_options.cmake new file mode 100644 index 000000000..832bfb69f --- /dev/null +++ b/contrib/draco/cmake/draco_options.cmake @@ -0,0 +1,239 @@ +if(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_OPTIONS_CMAKE_ +set(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_) + +set(draco_features_file_name "${draco_build}/draco/draco_features.h") +set(draco_features_list) + +# Simple wrapper for CMake's builtin option command that tracks draco's build +# options in the list variable $draco_options. +macro(draco_option) + unset(option_NAME) + unset(option_HELPSTRING) + unset(option_VALUE) + unset(optional_args) + unset(multi_value_args) + set(single_value_args NAME HELPSTRING VALUE) + cmake_parse_arguments(option "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(NOT (option_NAME AND option_HELPSTRING AND DEFINED option_VALUE)) + message(FATAL_ERROR "draco_option: NAME HELPSTRING and VALUE required.") + endif() + + option(${option_NAME} ${option_HELPSTRING} ${option_VALUE}) + + if(DRACO_VERBOSE GREATER 2) + message("--------- draco_option ---------\n" "option_NAME=${option_NAME}\n" + "option_HELPSTRING=${option_HELPSTRING}\n" + "option_VALUE=${option_VALUE}\n" + "------------------------------------------\n") + endif() + + list(APPEND draco_options ${option_NAME}) + list(REMOVE_DUPLICATES draco_options) +endmacro() + +# Dumps the $draco_options list via CMake message command. +macro(draco_dump_options) + foreach(option_name ${draco_options}) + message("${option_name}: ${${option_name}}") + endforeach() +endmacro() + +# Set default options. +macro(draco_set_default_options) + draco_option(NAME DRACO_FAST HELPSTRING "Try to build faster libs." VALUE OFF) + draco_option(NAME DRACO_JS_GLUE HELPSTRING + "Enable JS Glue and JS targets when using Emscripten." VALUE ON) + draco_option(NAME DRACO_IE_COMPATIBLE HELPSTRING + "Enable support for older IE builds when using Emscripten." VALUE + OFF) + draco_option(NAME DRACO_MESH_COMPRESSION HELPSTRING "Enable mesh compression." + VALUE ON) + draco_option(NAME DRACO_POINT_CLOUD_COMPRESSION HELPSTRING + "Enable point cloud compression." VALUE ON) + draco_option(NAME DRACO_PREDICTIVE_EDGEBREAKER HELPSTRING + "Enable predictive edgebreaker." VALUE ON) + draco_option(NAME DRACO_STANDARD_EDGEBREAKER HELPSTRING + "Enable stand edgebreaker." VALUE ON) + draco_option(NAME DRACO_BACKWARDS_COMPATIBILITY HELPSTRING + "Enable backwards compatibility." VALUE ON) + draco_option(NAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION HELPSTRING + "Enable attribute deduping." VALUE OFF) + draco_option(NAME DRACO_TESTS HELPSTRING "Enables tests." VALUE OFF) + draco_option(NAME DRACO_WASM HELPSTRING "Enables WASM support." VALUE OFF) + draco_option(NAME DRACO_UNITY_PLUGIN HELPSTRING + "Build plugin library for Unity." VALUE OFF) + draco_option(NAME DRACO_ANIMATION_ENCODING HELPSTRING "Enable animation." + VALUE OFF) + draco_option(NAME DRACO_GLTF HELPSTRING "Support GLTF." VALUE OFF) + draco_option(NAME DRACO_MAYA_PLUGIN HELPSTRING + "Build plugin library for Maya." VALUE OFF) + draco_check_deprecated_options() +endmacro() + +# Warns when a deprecated option is used and sets the option that replaced it. +macro(draco_handle_deprecated_option) + unset(option_OLDNAME) + unset(option_NEWNAME) + unset(optional_args) + unset(multi_value_args) + set(single_value_args OLDNAME NEWNAME) + cmake_parse_arguments(option "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if("${${option_OLDNAME}}") + message(WARNING "${option_OLDNAME} is deprecated. Use ${option_NEWNAME}.") + set(${option_NEWNAME} ${${option_OLDNAME}}) + endif() +endmacro() + +# Checks for use of deprecated options. +macro(draco_check_deprecated_options) + draco_handle_deprecated_option(OLDNAME ENABLE_EXTRA_SPEED NEWNAME DRACO_FAST) + draco_handle_deprecated_option(OLDNAME ENABLE_JS_GLUE NEWNAME DRACO_JS_GLUE) + draco_handle_deprecated_option(OLDNAME ENABLE_MESH_COMPRESSION NEWNAME + DRACO_MESH_COMPRESSION) + draco_handle_deprecated_option(OLDNAME ENABLE_POINT_CLOUD_COMPRESSION NEWNAME + DRACO_POINT_CLOUD_COMPRESSION) + draco_handle_deprecated_option(OLDNAME ENABLE_PREDICTIVE_EDGEBREAKER NEWNAME + DRACO_PREDICTIVE_EDGEBREAKER) + draco_handle_deprecated_option(OLDNAME ENABLE_STANDARD_EDGEBREAKER NEWNAME + DRACO_STANDARD_EDGEBREAKER) + draco_handle_deprecated_option(OLDNAME ENABLE_BACKWARDS_COMPATIBILITY NEWNAME + DRACO_BACKWARDS_COMPATIBILITY) + draco_handle_deprecated_option(OLDNAME ENABLE_DECODER_ATTRIBUTE_DEDUPLICATION + NEWNAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION) + draco_handle_deprecated_option(OLDNAME ENABLE_TESTS NEWNAME DRACO_TESTS) + draco_handle_deprecated_option(OLDNAME ENABLE_WASM NEWNAME DRACO_WASM) + draco_handle_deprecated_option(OLDNAME BUILD_UNITY_PLUGIN NEWNAME + DRACO_UNITY_PLUGIN) + draco_handle_deprecated_option(OLDNAME BUILD_ANIMATION_ENCODING NEWNAME + DRACO_ANIMATION_ENCODING) + draco_handle_deprecated_option(OLDNAME BUILD_FOR_GLTF NEWNAME DRACO_GLTF) + draco_handle_deprecated_option(OLDNAME BUILD_MAYA_PLUGIN NEWNAME + DRACO_MAYA_PLUGIN) + draco_handle_deprecated_option(OLDNAME BUILD_USD_PLUGIN NEWNAME + BUILD_SHARED_LIBS) + +endmacro() + +# Macro for setting Draco features based on user configuration. Features enabled +# by this macro are Draco global. +macro(draco_set_optional_features) + if(DRACO_GLTF) + # Override settings when building for GLTF. + draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED") + draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED") + draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED") + else() + if(DRACO_POINT_CLOUD_COMPRESSION) + draco_enable_feature(FEATURE "DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED") + endif() + if(DRACO_MESH_COMPRESSION) + draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED") + draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED") + + if(DRACO_STANDARD_EDGEBREAKER) + draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED") + endif() + if(DRACO_PREDICTIVE_EDGEBREAKER) + draco_enable_feature(FEATURE "DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED") + endif() + endif() + + if(DRACO_BACKWARDS_COMPATIBILITY) + draco_enable_feature(FEATURE "DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED") + endif() + + + if(NOT EMSCRIPTEN) + # For now, enable deduplication for both encoder and decoder. + # TODO(ostava): Support for disabling attribute deduplication for the C++ + # decoder is planned in future releases. + draco_enable_feature(FEATURE + DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED) + draco_enable_feature(FEATURE + DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED) + endif() + endif() + + if(DRACO_UNITY_PLUGIN) + draco_enable_feature(FEATURE "DRACO_UNITY_PLUGIN") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif() + + if(DRACO_MAYA_PLUGIN) + draco_enable_feature(FEATURE "DRACO_MAYA_PLUGIN") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + endif() + +endmacro() + +# Macro that handles tracking of Draco preprocessor symbols for the purpose of +# producing draco_features.h. +# +# ~~~ +# draco_enable_feature(FEATURE [TARGETS ]) +# ~~~ +# +# FEATURE is required. It should be a Draco preprocessor symbol. TARGETS is +# optional. It can be one or more draco targets. +# +# When the TARGETS argument is not present the preproc symbol is added to +# draco_features.h. When it is draco_features.h is unchanged, and +# target_compile_options() is called for each target specified. +macro(draco_enable_feature) + set(def_flags) + set(def_single_arg_opts FEATURE) + set(def_multi_arg_opts TARGETS) + cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}" + "${def_multi_arg_opts}" ${ARGN}) + if("${DEF_FEATURE}" STREQUAL "") + message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().") + endif() + + # Do nothing/return early if $DEF_FEATURE is already in the list. + list(FIND draco_features_list ${DEF_FEATURE} df_index) + if(NOT df_index EQUAL -1) + return() + endif() + + list(LENGTH DEF_TARGETS df_targets_list_length) + if(${df_targets_list_length} EQUAL 0) + list(APPEND draco_features_list ${DEF_FEATURE}) + else() + foreach(target ${DEF_TARGETS}) + target_compile_definitions(${target} PRIVATE ${DEF_FEATURE}) + endforeach() + endif() +endmacro() + +# Function for generating draco_features.h. +function(draco_generate_features_h) + file(WRITE "${draco_features_file_name}.new" + "// GENERATED FILE -- DO NOT EDIT\n\n" "#ifndef DRACO_FEATURES_H_\n" + "#define DRACO_FEATURES_H_\n\n") + + foreach(feature ${draco_features_list}) + file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n") + endforeach() + + file(APPEND "${draco_features_file_name}.new" + "\n#endif // DRACO_FEATURES_H_") + + # Will replace ${draco_features_file_name} only if the file content has + # changed. This prevents forced Draco rebuilds after CMake runs. + configure_file("${draco_features_file_name}.new" + "${draco_features_file_name}") + file(REMOVE "${draco_features_file_name}.new") +endfunction() + +# Sets default options for the build and processes user controlled options to +# compute enabled features. +macro(draco_setup_options) + draco_set_default_options() + draco_set_optional_features() +endmacro() diff --git a/contrib/draco/cmake/draco_sanitizer.cmake b/contrib/draco/cmake/draco_sanitizer.cmake new file mode 100644 index 000000000..ca8e23176 --- /dev/null +++ b/contrib/draco/cmake/draco_sanitizer.cmake @@ -0,0 +1,32 @@ +if(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_SANITIZER_CMAKE_ +set(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_ 1) + +# Handles the details of enabling sanitizers. +macro(draco_configure_sanitizer) + if(DRACO_SANITIZE AND NOT MSVC) + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + if(DRACO_SANITIZE MATCHES "cfi") + list(APPEND DRACO_CXX_FLAGS "-flto" "-fno-sanitize-trap=cfi") + list(APPEND DRACO_EXE_LINKER_FLAGS "-flto" "-fno-sanitize-trap=cfi" + "-fuse-ld=gold") + endif() + + if(${CMAKE_SIZEOF_VOID_P} EQUAL 4 + AND DRACO_SANITIZE MATCHES "integer|undefined") + list(APPEND DRACO_EXE_LINKER_FLAGS "--rtlib=compiler-rt" "-lgcc_s") + endif() + endif() + + list(APPEND DRACO_CXX_FLAGS "-fsanitize=${DRACO_SANITIZE}") + list(APPEND DRACO_EXE_LINKER_FLAGS "-fsanitize=${DRACO_SANITIZE}") + + # Make sanitizer callstacks accurate. + list(APPEND DRACO_CXX_FLAGS "-fno-omit-frame-pointer" + "-fno-optimize-sibling-calls") + + draco_test_cxx_flag(FLAG_LIST_VAR_NAMES DRACO_CXX_FLAGS FLAG_REQUIRED) + draco_test_exe_linker_flag(FLAG_LIST_VAR_NAME DRACO_EXE_LINKER_FLAGS) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_targets.cmake b/contrib/draco/cmake/draco_targets.cmake new file mode 100644 index 000000000..6dfa6a0c4 --- /dev/null +++ b/contrib/draco/cmake/draco_targets.cmake @@ -0,0 +1,349 @@ +if(DRACO_CMAKE_DRACO_TARGETS_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_TARGETS_CMAKE_ +set(DRACO_CMAKE_DRACO_TARGETS_CMAKE_ 1) + +# Resets list variables used to track draco targets. +macro(draco_reset_target_lists) + unset(draco_targets) + unset(draco_exe_targets) + unset(draco_lib_targets) + unset(draco_objlib_targets) + unset(draco_module_targets) + unset(draco_sources) + unset(draco_test_targets) +endmacro() + +# Creates an executable target. The target name is passed as a parameter to the +# NAME argument, and the sources passed as a parameter to the SOURCES argument: +# draco_add_executable(NAME SOURCES [optional args]) +# +# Optional args: +# cmake-format: off +# - OUTPUT_NAME: Override output file basename. Target basename defaults to +# NAME. +# - TEST: Flag. Presence means treat executable as a test. +# - DEFINES: List of preprocessor macro definitions. +# - INCLUDES: list of include directories for the target. +# - COMPILE_FLAGS: list of compiler flags for the target. +# - LINK_FLAGS: List of linker flags for the target. +# - OBJLIB_DEPS: List of CMake object library target dependencies. +# - LIB_DEPS: List of CMake library dependencies. +# cmake-format: on +# +# Sources passed to this macro are added to $draco_test_sources when TEST is +# specified. Otherwise sources are added to $draco_sources. +# +# Targets passed to this macro are always added to the $draco_targets list. When +# TEST is specified targets are also added to the $draco_test_targets list. +# Otherwise targets are added to $draco_exe_targets. +macro(draco_add_executable) + unset(exe_TEST) + unset(exe_TEST_DEFINES_MAIN) + unset(exe_NAME) + unset(exe_OUTPUT_NAME) + unset(exe_SOURCES) + unset(exe_DEFINES) + unset(exe_INCLUDES) + unset(exe_COMPILE_FLAGS) + unset(exe_LINK_FLAGS) + unset(exe_OBJLIB_DEPS) + unset(exe_LIB_DEPS) + set(optional_args TEST) + set(single_value_args NAME OUTPUT_NAME) + set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS + OBJLIB_DEPS LIB_DEPS) + + cmake_parse_arguments(exe "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_add_executable ---------\n" + "exe_TEST=${exe_TEST}\n" + "exe_TEST_DEFINES_MAIN=${exe_TEST_DEFINES_MAIN}\n" + "exe_NAME=${exe_NAME}\n" + "exe_OUTPUT_NAME=${exe_OUTPUT_NAME}\n" + "exe_SOURCES=${exe_SOURCES}\n" + "exe_DEFINES=${exe_DEFINES}\n" + "exe_INCLUDES=${exe_INCLUDES}\n" + "exe_COMPILE_FLAGS=${exe_COMPILE_FLAGS}\n" + "exe_LINK_FLAGS=${exe_LINK_FLAGS}\n" + "exe_OBJLIB_DEPS=${exe_OBJLIB_DEPS}\n" + "exe_LIB_DEPS=${exe_LIB_DEPS}\n" + "------------------------------------------\n") + endif() + + if(NOT (exe_NAME AND exe_SOURCES)) + message(FATAL_ERROR "draco_add_executable: NAME and SOURCES required.") + endif() + + list(APPEND draco_targets ${exe_NAME}) + if(exe_TEST) + list(APPEND draco_test_targets ${exe_NAME}) + list(APPEND draco_test_sources ${exe_SOURCES}) + else() + list(APPEND draco_exe_targets ${exe_NAME}) + list(APPEND draco_sources ${exe_SOURCES}) + endif() + + add_executable(${exe_NAME} ${exe_SOURCES}) + + if(exe_OUTPUT_NAME) + set_target_properties(${exe_NAME} PROPERTIES OUTPUT_NAME ${exe_OUTPUT_NAME}) + endif() + + draco_process_intrinsics_sources(TARGET ${exe_NAME} SOURCES ${exe_SOURCES}) + + if(exe_DEFINES) + target_compile_definitions(${exe_NAME} PRIVATE ${exe_DEFINES}) + endif() + + if(exe_INCLUDES) + target_include_directories(${exe_NAME} PRIVATE ${exe_INCLUDES}) + endif() + + if(exe_COMPILE_FLAGS OR DRACO_CXX_FLAGS) + target_compile_options(${exe_NAME} + PRIVATE ${exe_COMPILE_FLAGS} ${DRACO_CXX_FLAGS}) + endif() + + if(exe_LINK_FLAGS OR DRACO_EXE_LINKER_FLAGS) + if(${CMAKE_VERSION} VERSION_LESS "3.13") + set(link_flags ${exe_LINK_FLAGS} ${DRACO_EXE_LINKER_FLAGS}) + set_target_properties(${exe_NAME} + PROPERTIES LINK_FLAGS ${exe_LINK_FLAGS} + ${DRACO_EXE_LINKER_FLAGS}) + else() + target_link_options(${exe_NAME} PRIVATE ${exe_LINK_FLAGS} + ${DRACO_EXE_LINKER_FLAGS}) + endif() + endif() + + if(exe_OBJLIB_DEPS) + foreach(objlib_dep ${exe_OBJLIB_DEPS}) + target_sources(${exe_NAME} PRIVATE $) + endforeach() + endif() + + if(CMAKE_THREAD_LIBS_INIT) + list(APPEND exe_LIB_DEPS ${CMAKE_THREAD_LIBS_INIT}) + endif() + + if(BUILD_SHARED_LIBS AND (MSVC OR WIN32)) + target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=0") + endif() + + if(exe_LIB_DEPS) + unset(exe_static) + if("${CMAKE_EXE_LINKER_FLAGS} ${DRACO_EXE_LINKER_FLAGS}" MATCHES "static") + set(exe_static ON) + endif() + + if(exe_static AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + # Third party dependencies can introduce dependencies on system and test + # libraries. Since the target created here is an executable, and CMake + # does not provide a method of controlling order of link dependencies, + # wrap all of the dependencies of this target in start/end group flags to + # ensure that dependencies of third party targets can be resolved when + # those dependencies happen to be resolved by dependencies of the current + # target. + list(INSERT exe_LIB_DEPS 0 -Wl,--start-group) + list(APPEND exe_LIB_DEPS -Wl,--end-group) + endif() + target_link_libraries(${exe_NAME} PRIVATE ${exe_LIB_DEPS}) + endif() +endmacro() + +# Creates a library target of the specified type. The target name is passed as a +# parameter to the NAME argument, the type as a parameter to the TYPE argument, +# and the sources passed as a parameter to the SOURCES argument: +# draco_add_library(NAME TYPE SOURCES [optional args]) +# +# Optional args: +# cmake-format: off +# - OUTPUT_NAME: Override output file basename. Target basename defaults to +# NAME. OUTPUT_NAME is ignored when BUILD_SHARED_LIBS is enabled and CMake +# is generating a build for which MSVC or WIN32 are true. This is to avoid +# output basename collisions with DLL import libraries. +# - TEST: Flag. Presence means treat library as a test. +# - DEFINES: List of preprocessor macro definitions. +# - INCLUDES: list of include directories for the target. +# - COMPILE_FLAGS: list of compiler flags for the target. +# - LINK_FLAGS: List of linker flags for the target. +# - OBJLIB_DEPS: List of CMake object library target dependencies. +# - LIB_DEPS: List of CMake library dependencies. +# - PUBLIC_INCLUDES: List of include paths to export to dependents. +# cmake-format: on +# +# Sources passed to the macro are added to the lists tracking draco sources: +# cmake-format: off +# - When TEST is specified sources are added to $draco_test_sources. +# - Otherwise sources are added to $draco_sources. +# cmake-format: on +# +# Targets passed to this macro are added to the lists tracking draco targets: +# cmake-format: off +# - Targets are always added to $draco_targets. +# - When the TEST flag is specified, targets are added to +# $draco_test_targets. +# - When TEST is not specified: +# - Libraries of type SHARED are added to $draco_dylib_targets. +# - Libraries of type OBJECT are added to $draco_objlib_targets. +# - Libraries of type STATIC are added to $draco_lib_targets. +# cmake-format: on +macro(draco_add_library) + unset(lib_TEST) + unset(lib_NAME) + unset(lib_OUTPUT_NAME) + unset(lib_TYPE) + unset(lib_SOURCES) + unset(lib_DEFINES) + unset(lib_INCLUDES) + unset(lib_COMPILE_FLAGS) + unset(lib_LINK_FLAGS) + unset(lib_OBJLIB_DEPS) + unset(lib_LIB_DEPS) + unset(lib_PUBLIC_INCLUDES) + unset(lib_TARGET_PROPERTIES) + set(optional_args TEST) + set(single_value_args NAME OUTPUT_NAME TYPE) + set(multi_value_args SOURCES DEFINES INCLUDES COMPILE_FLAGS LINK_FLAGS + OBJLIB_DEPS LIB_DEPS PUBLIC_INCLUDES TARGET_PROPERTIES) + + cmake_parse_arguments(lib "${optional_args}" "${single_value_args}" + "${multi_value_args}" ${ARGN}) + + if(DRACO_VERBOSE GREATER 1) + message("--------- draco_add_library ---------\n" + "lib_TEST=${lib_TEST}\n" + "lib_NAME=${lib_NAME}\n" + "lib_OUTPUT_NAME=${lib_OUTPUT_NAME}\n" + "lib_TYPE=${lib_TYPE}\n" + "lib_SOURCES=${lib_SOURCES}\n" + "lib_DEFINES=${lib_DEFINES}\n" + "lib_INCLUDES=${lib_INCLUDES}\n" + "lib_COMPILE_FLAGS=${lib_COMPILE_FLAGS}\n" + "lib_LINK_FLAGS=${lib_LINK_FLAGS}\n" + "lib_OBJLIB_DEPS=${lib_OBJLIB_DEPS}\n" + "lib_LIB_DEPS=${lib_LIB_DEPS}\n" + "lib_PUBLIC_INCLUDES=${lib_PUBLIC_INCLUDES}\n" + "---------------------------------------\n") + endif() + + if(NOT (lib_NAME AND lib_TYPE)) + message(FATAL_ERROR "draco_add_library: NAME and TYPE required.") + endif() + + list(APPEND draco_targets ${lib_NAME}) + if(lib_TEST) + list(APPEND draco_test_targets ${lib_NAME}) + list(APPEND draco_test_sources ${lib_SOURCES}) + else() + list(APPEND draco_sources ${lib_SOURCES}) + if(lib_TYPE STREQUAL MODULE) + list(APPEND draco_module_targets ${lib_NAME}) + elseif(lib_TYPE STREQUAL OBJECT) + list(APPEND draco_objlib_targets ${lib_NAME}) + elseif(lib_TYPE STREQUAL SHARED) + list(APPEND draco_dylib_targets ${lib_NAME}) + elseif(lib_TYPE STREQUAL STATIC) + list(APPEND draco_lib_targets ${lib_NAME}) + else() + message(WARNING "draco_add_library: Unhandled type: ${lib_TYPE}") + endif() + endif() + + add_library(${lib_NAME} ${lib_TYPE} ${lib_SOURCES}) + if(lib_SOURCES) + draco_process_intrinsics_sources(TARGET ${lib_NAME} SOURCES ${lib_SOURCES}) + endif() + + if(lib_OUTPUT_NAME) + if(NOT (BUILD_SHARED_LIBS AND (MSVC OR WIN32))) + set_target_properties(${lib_NAME} + PROPERTIES OUTPUT_NAME ${lib_OUTPUT_NAME}) + endif() + endif() + + if(lib_DEFINES) + target_compile_definitions(${lib_NAME} PRIVATE ${lib_DEFINES}) + endif() + + if(lib_INCLUDES) + target_include_directories(${lib_NAME} PRIVATE ${lib_INCLUDES}) + endif() + + if(lib_PUBLIC_INCLUDES) + target_include_directories(${lib_NAME} PUBLIC ${lib_PUBLIC_INCLUDES}) + endif() + + if(lib_COMPILE_FLAGS OR DRACO_CXX_FLAGS) + target_compile_options(${lib_NAME} + PRIVATE ${lib_COMPILE_FLAGS} ${DRACO_CXX_FLAGS}) + endif() + + if(lib_LINK_FLAGS) + set_target_properties(${lib_NAME} PROPERTIES LINK_FLAGS ${lib_LINK_FLAGS}) + endif() + + if(lib_OBJLIB_DEPS) + foreach(objlib_dep ${lib_OBJLIB_DEPS}) + target_sources(${lib_NAME} PRIVATE $) + endforeach() + endif() + + if(lib_LIB_DEPS) + if(lib_TYPE STREQUAL STATIC) + set(link_type PUBLIC) + else() + set(link_type PRIVATE) + if(lib_TYPE STREQUAL SHARED AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") + # The draco shared object uses the static draco as input to turn it into + # a shared object. Include everything from the static library in the + # shared object. + if(APPLE) + list(INSERT lib_LIB_DEPS 0 -Wl,-force_load) + else() + list(INSERT lib_LIB_DEPS 0 -Wl,--whole-archive) + list(APPEND lib_LIB_DEPS -Wl,--no-whole-archive) + endif() + endif() + endif() + target_link_libraries(${lib_NAME} ${link_type} ${lib_LIB_DEPS}) + endif() + + if(NOT MSVC AND lib_NAME MATCHES "^lib") + # Non-MSVC generators prepend lib to static lib target file names. Libdraco + # already includes lib in its name. Avoid naming output files liblib*. + set_target_properties(${lib_NAME} PROPERTIES PREFIX "") + endif() + + if(lib_TYPE STREQUAL SHARED AND NOT MSVC) + set_target_properties(${lib_NAME} PROPERTIES SOVERSION ${DRACO_SOVERSION}) + endif() + + if(BUILD_SHARED_LIBS AND (MSVC OR WIN32)) + if(lib_TYPE STREQUAL SHARED) + target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=1") + else() + target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=0") + endif() + endif() + + # Determine if $lib_NAME is a header only target. + unset(sources_list) + if(lib_SOURCES) + set(sources_list ${lib_SOURCES}) + list(FILTER sources_list INCLUDE REGEX cc$) + endif() + + if(NOT sources_list) + if(NOT XCODE) + # This is a header only target. Tell CMake the link language. + set_target_properties(${lib_NAME} PROPERTIES LINKER_LANGUAGE CXX) + else() + # The Xcode generator ignores LINKER_LANGUAGE. Add a dummy cc file. + draco_create_dummy_source_file(TARGET ${lib_NAME} BASENAME ${lib_NAME}) + endif() + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_test_config.h.cmake b/contrib/draco/cmake/draco_test_config.h.cmake new file mode 100644 index 000000000..77a574123 --- /dev/null +++ b/contrib/draco/cmake/draco_test_config.h.cmake @@ -0,0 +1,13 @@ +#ifndef DRACO_TESTING_DRACO_TEST_CONFIG_H_ +#define DRACO_TESTING_DRACO_TEST_CONFIG_H_ + +// If this file is named draco_test_config.h.cmake: +// This file is used as input at cmake generation time. + +// If this file is named draco_test_config.h: +// GENERATED FILE, DO NOT EDIT. SEE ABOVE. + +#define DRACO_TEST_DATA_DIR "${DRACO_TEST_DATA_DIR}" +#define DRACO_TEST_TEMP_DIR "${DRACO_TEST_TEMP_DIR}" + +#endif // DRACO_TESTING_DRACO_TEST_CONFIG_H_ diff --git a/contrib/draco/cmake/draco_tests.cmake b/contrib/draco/cmake/draco_tests.cmake new file mode 100644 index 000000000..a6dfc5b57 --- /dev/null +++ b/contrib/draco/cmake/draco_tests.cmake @@ -0,0 +1,133 @@ +if(DRACO_CMAKE_DRACO_TESTS_CMAKE) + return() +endif() +set(DRACO_CMAKE_DRACO_TESTS_CMAKE 1) + +# The factory tests are in a separate target to avoid breaking tests that rely +# on file I/O via the factories. The fake reader and writer implementations +# interfere with normal file I/O function. +set(draco_factory_test_sources + "${draco_src_root}/io/file_reader_factory_test.cc" + "${draco_src_root}/io/file_writer_factory_test.cc") + +list( + APPEND + draco_test_sources + "${draco_src_root}/animation/keyframe_animation_encoding_test.cc" + "${draco_src_root}/animation/keyframe_animation_test.cc" + "${draco_src_root}/attributes/point_attribute_test.cc" + "${draco_src_root}/compression/attributes/point_d_vector_test.cc" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc" + "${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc" + "${draco_src_root}/compression/attributes/sequential_integer_attribute_encoding_test.cc" + "${draco_src_root}/compression/bit_coders/rans_coding_test.cc" + "${draco_src_root}/compression/decode_test.cc" + "${draco_src_root}/compression/encode_test.cc" + "${draco_src_root}/compression/entropy/shannon_entropy_test.cc" + "${draco_src_root}/compression/entropy/symbol_coding_test.cc" + "${draco_src_root}/compression/mesh/mesh_edgebreaker_encoding_test.cc" + "${draco_src_root}/compression/mesh/mesh_encoder_test.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc" + "${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoding_test.cc" + "${draco_src_root}/core/buffer_bit_coding_test.cc" + "${draco_src_root}/core/draco_test_base.h" + "${draco_src_root}/core/draco_test_utils.cc" + "${draco_src_root}/core/draco_test_utils.h" + "${draco_src_root}/core/math_utils_test.cc" + "${draco_src_root}/core/quantization_utils_test.cc" + "${draco_src_root}/core/status_test.cc" + "${draco_src_root}/core/vector_d_test.cc" + "${draco_src_root}/io/file_reader_test_common.h" + "${draco_src_root}/io/file_utils_test.cc" + "${draco_src_root}/io/stdio_file_reader_test.cc" + "${draco_src_root}/io/stdio_file_writer_test.cc" + "${draco_src_root}/io/obj_decoder_test.cc" + "${draco_src_root}/io/obj_encoder_test.cc" + "${draco_src_root}/io/ply_decoder_test.cc" + "${draco_src_root}/io/ply_reader_test.cc" + "${draco_src_root}/io/point_cloud_io_test.cc" + "${draco_src_root}/mesh/mesh_are_equivalent_test.cc" + "${draco_src_root}/mesh/mesh_cleanup_test.cc" + "${draco_src_root}/mesh/triangle_soup_mesh_builder_test.cc" + "${draco_src_root}/metadata/metadata_encoder_test.cc" + "${draco_src_root}/metadata/metadata_test.cc" + "${draco_src_root}/point_cloud/point_cloud_builder_test.cc" + "${draco_src_root}/point_cloud/point_cloud_test.cc") + +list(APPEND draco_gtest_all + "${draco_root}/../googletest/googletest/src/gtest-all.cc") +list(APPEND draco_gtest_main + "${draco_root}/../googletest/googletest/src/gtest_main.cc") + +macro(draco_setup_test_targets) + if(DRACO_TESTS) + if(NOT (EXISTS ${draco_gtest_all} AND EXISTS ${draco_gtest_main})) + message(FATAL "googletest must be a sibling directory of ${draco_root}.") + endif() + + list(APPEND draco_test_defines GTEST_HAS_PTHREAD=0) + + draco_add_library(TEST + NAME + draco_gtest + TYPE + STATIC + SOURCES + ${draco_gtest_all} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths}) + + draco_add_library(TEST + NAME + draco_gtest_main + TYPE + STATIC + SOURCES + ${draco_gtest_main} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths}) + + set(DRACO_TEST_DATA_DIR "${draco_root}/testdata") + set(DRACO_TEST_TEMP_DIR "${draco_build}/draco_test_temp") + file(MAKE_DIRECTORY "${DRACO_TEST_TEMP_DIR}") + + # Sets DRACO_TEST_DATA_DIR and DRACO_TEST_TEMP_DIR. + configure_file("${draco_root}/cmake/draco_test_config.h.cmake" + "${draco_build}/testing/draco_test_config.h") + + # Create the test targets. + draco_add_executable(NAME + draco_tests + SOURCES + ${draco_test_sources} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths} + LIB_DEPS + draco_static + draco_gtest + draco_gtest_main) + + draco_add_executable(NAME + draco_factory_tests + SOURCES + ${draco_factory_test_sources} + DEFINES + ${draco_defines} + ${draco_test_defines} + INCLUDES + ${draco_test_include_paths} + LIB_DEPS + draco_static + draco_gtest + draco_gtest_main) + endif() +endmacro() diff --git a/contrib/draco/cmake/draco_variables.cmake b/contrib/draco/cmake/draco_variables.cmake new file mode 100644 index 000000000..8dbc77a53 --- /dev/null +++ b/contrib/draco/cmake/draco_variables.cmake @@ -0,0 +1,64 @@ +if(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_) + return() +endif() # DRACO_CMAKE_DRACO_VARIABLES_CMAKE_ +set(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_ 1) + +# Halts generation when $variable_name does not refer to a directory that +# exists. +macro(draco_variable_must_be_directory variable_name) + if("${variable_name}" STREQUAL "") + message( + FATAL_ERROR + "Empty variable_name passed to draco_variable_must_be_directory.") + endif() + + if("${${variable_name}}" STREQUAL "") + message( + FATAL_ERROR + "Empty variable ${variable_name} is required to build draco.") + endif() + + if(NOT IS_DIRECTORY "${${variable_name}}") + message( + FATAL_ERROR + "${variable_name}, which is ${${variable_name}}, does not refer to a\n" + "directory.") + endif() +endmacro() + +# Adds $var_name to the tracked variables list. +macro(draco_track_configuration_variable var_name) + if(DRACO_VERBOSE GREATER 2) + message("---- draco_track_configuration_variable ----\n" + "var_name=${var_name}\n" + "----------------------------------------------\n") + endif() + + list(APPEND draco_configuration_variables ${var_name}) + list(REMOVE_DUPLICATES draco_configuration_variables) +endmacro() + +# Logs current C++ and executable linker flags via the CMake message command. +macro(draco_dump_cmake_flag_variables) + unset(flag_variables) + list(APPEND flag_variables "CMAKE_CXX_FLAGS_INIT" "CMAKE_CXX_FLAGS" + "CMAKE_EXE_LINKER_FLAGS_INIT" "CMAKE_EXE_LINKER_FLAGS") + if(CMAKE_BUILD_TYPE) + list(APPEND flag_variables "CMAKE_BUILD_TYPE" + "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}_INIT" + "CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}" + "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}_INIT" + "CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}") + endif() + foreach(flag_variable ${flag_variables}) + message("${flag_variable}:${${flag_variable}}") + endforeach() +endmacro() + +# Dumps the variables tracked in $draco_configuration_variables via the CMake +# message command. +macro(draco_dump_tracked_configuration_variables) + foreach(config_variable ${draco_configuration_variables}) + message("${config_variable}:${${config_variable}}") + endforeach() +endmacro() diff --git a/contrib/draco/cmake/sanitizers.cmake b/contrib/draco/cmake/sanitizers.cmake new file mode 100644 index 000000000..e720bc045 --- /dev/null +++ b/contrib/draco/cmake/sanitizers.cmake @@ -0,0 +1,19 @@ +if(DRACO_CMAKE_SANITIZERS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_SANITIZERS_CMAKE_ 1) + +if(MSVC OR NOT SANITIZE) + return() +endif() + +include("${draco_root}/cmake/compiler_flags.cmake") + +string(TOLOWER ${SANITIZE} SANITIZE) + +# Require the sanitizer requested. +require_linker_flag("-fsanitize=${SANITIZE}") +require_compiler_flag("-fsanitize=${SANITIZE}" YES) + +# Make callstacks accurate. +require_compiler_flag("-fno-omit-frame-pointer -fno-optimize-sibling-calls" YES) diff --git a/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake b/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake new file mode 100644 index 000000000..87e0b4a45 --- /dev/null +++ b/contrib/draco/cmake/toolchains/aarch64-linux-gnu.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_) + return() +endif() # DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_ +set(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + set(CROSS aarch64-linux-gnu-) +endif() + +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(CMAKE_CXX_FLAGS_INIT "-march=armv8-a") +set(CMAKE_SYSTEM_PROCESSOR "aarch64") diff --git a/contrib/draco/cmake/toolchains/android-ndk-common.cmake b/contrib/draco/cmake/toolchains/android-ndk-common.cmake new file mode 100644 index 000000000..5126d6e29 --- /dev/null +++ b/contrib/draco/cmake/toolchains/android-ndk-common.cmake @@ -0,0 +1,23 @@ +if(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_ 1) + +# Toolchain files do not have access to cached variables: +# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate +# environment variable when loaded the first time. +if(DRACO_ANDROID_NDK_PATH) + set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}") +else() + set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}") +endif() + +set(CMAKE_SYSTEM_NAME Android) + +if(NOT CMAKE_ANDROID_STL_TYPE) + set(CMAKE_ANDROID_STL_TYPE c++_static) +endif() + +if(NOT CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION) + set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION clang) +endif() diff --git a/contrib/draco/cmake/toolchains/android.cmake b/contrib/draco/cmake/toolchains/android.cmake new file mode 100644 index 000000000..b8f576d5e --- /dev/null +++ b/contrib/draco/cmake/toolchains/android.cmake @@ -0,0 +1,39 @@ +if(DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_) + return() +endif() # DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_ + +# Additional ANDROID_* settings are available, see: +# https://developer.android.com/ndk/guides/cmake#variables + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-21) +endif() + +# Choose target architecture with: +# +# -DANDROID_ABI={armeabi-v7a,armeabi-v7a with NEON,arm64-v8a,x86,x86_64} +if(NOT ANDROID_ABI) + set(ANDROID_ABI arm64-v8a) +endif() + +# Force arm mode for 32-bit targets (instead of the default thumb) to improve +# performance. +if(NOT ANDROID_ARM_MODE) + set(ANDROID_ARM_MODE arm) +endif() + +# Toolchain files do not have access to cached variables: +# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate +# environment variable when loaded the first time. +if(DRACO_ANDROID_NDK_PATH) + set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}") +else() + set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}") +endif() + +if(NOT DRACO_ANDROID_NDK_PATH) + message(FATAL_ERROR "DRACO_ANDROID_NDK_PATH not set.") + return() +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/arm-ios-common.cmake b/contrib/draco/cmake/toolchains/arm-ios-common.cmake new file mode 100644 index 000000000..65326d1c2 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm-ios-common.cmake @@ -0,0 +1,17 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM_IOS_COMMON_CMAKE_) + return() +endif() +set(DRACO_CMAKE_ARM_IOS_COMMON_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Darwin") +if(CMAKE_OSX_SDK) + set(CMAKE_OSX_SYSROOT ${CMAKE_OSX_SDK}) +else() + set(CMAKE_OSX_SYSROOT iphoneos) +endif() +set(CMAKE_C_COMPILER clang) +set(CMAKE_C_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}") +set(CMAKE_CXX_COMPILER clang++) +set(CMAKE_CXX_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}") + +# TODO(tomfinegan): Handle bit code embedding. diff --git a/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake b/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake new file mode 100644 index 000000000..6e45969e9 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm-linux-gnueabihf.cmake @@ -0,0 +1,15 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_) + return() +endif() # DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_ +set(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + set(CROSS arm-linux-gnueabihf-) +endif() + +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(CMAKE_CXX_FLAGS_INIT "-march=armv7-a -marm") +set(CMAKE_SYSTEM_PROCESSOR "armv7") +set(DRACO_NEON_INTRINSICS_FLAG "-mfpu=neon") diff --git a/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake new file mode 100644 index 000000000..4b6d366f0 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm64-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANROID_PLATFORM android-21) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI arm64-v8a) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/arm64-ios.cmake b/contrib/draco/cmake/toolchains/arm64-ios.cmake new file mode 100644 index 000000000..c4ec7e3fa --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm64-ios.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "arm64") +set(CMAKE_OSX_ARCHITECTURES "arm64") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake b/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake new file mode 100644 index 000000000..046ff0139 --- /dev/null +++ b/contrib/draco/cmake/toolchains/arm64-linux-gcc.cmake @@ -0,0 +1,18 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + # Default the cross compiler prefix to something known to work. + set(CROSS aarch64-linux-gnu-) +endif() + +set(CMAKE_C_COMPILER ${CROSS}gcc) +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(AS_EXECUTABLE ${CROSS}as) +set(CMAKE_C_COMPILER_ARG1 "-march=armv8-a") +set(CMAKE_CXX_COMPILER_ARG1 "-march=armv8-a") +set(CMAKE_SYSTEM_PROCESSOR "arm64") diff --git a/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake new file mode 100644 index 000000000..80ee98b18 --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-18) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI armeabi-v7a) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/armv7-ios.cmake b/contrib/draco/cmake/toolchains/armv7-ios.cmake new file mode 100644 index 000000000..8ddd6997b --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7-ios.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "armv7") +set(CMAKE_OSX_ARCHITECTURES "armv7") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake b/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake new file mode 100644 index 000000000..9c9472319 --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7-linux-gcc.cmake @@ -0,0 +1,24 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_ 1) + +set(CMAKE_SYSTEM_NAME "Linux") + +if("${CROSS}" STREQUAL "") + # Default the cross compiler prefix to something known to work. + set(CROSS arm-linux-gnueabihf-) +endif() + +if(NOT ${CROSS} MATCHES hf-$) + set(DRACO_EXTRA_TOOLCHAIN_FLAGS "-mfloat-abi=softfp") +endif() + +set(CMAKE_C_COMPILER ${CROSS}gcc) +set(CMAKE_CXX_COMPILER ${CROSS}g++) +set(AS_EXECUTABLE ${CROSS}as) +set(CMAKE_C_COMPILER_ARG1 + "-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}") +set(CMAKE_CXX_COMPILER_ARG1 + "-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}") +set(CMAKE_SYSTEM_PROCESSOR "armv7") diff --git a/contrib/draco/cmake/toolchains/armv7s-ios.cmake b/contrib/draco/cmake/toolchains/armv7s-ios.cmake new file mode 100644 index 000000000..b433025ba --- /dev/null +++ b/contrib/draco/cmake/toolchains/armv7s-ios.cmake @@ -0,0 +1,14 @@ +if(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "armv7s") +set(CMAKE_OSX_ARCHITECTURES "armv7s") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/i386-ios.cmake b/contrib/draco/cmake/toolchains/i386-ios.cmake new file mode 100644 index 000000000..e9a105591 --- /dev/null +++ b/contrib/draco/cmake/toolchains/i386-ios.cmake @@ -0,0 +1,15 @@ +if(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "i386") +set(CMAKE_OSX_ARCHITECTURES "i386") +set(CMAKE_OSX_SDK "iphonesimulator") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake new file mode 100644 index 000000000..d43383640 --- /dev/null +++ b/contrib/draco/cmake/toolchains/x86-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-18) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI x86) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake b/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake new file mode 100644 index 000000000..d6fabeacc --- /dev/null +++ b/contrib/draco/cmake/toolchains/x86_64-android-ndk-libcpp.cmake @@ -0,0 +1,16 @@ +if(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_ 1) + +include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake") + +if(NOT ANDROID_PLATFORM) + set(ANDROID_PLATFORM android-21) +endif() + +if(NOT ANDROID_ABI) + set(ANDROID_ABI x86_64) +endif() + +include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake") diff --git a/contrib/draco/cmake/toolchains/x86_64-ios.cmake b/contrib/draco/cmake/toolchains/x86_64-ios.cmake new file mode 100644 index 000000000..4c50a72a2 --- /dev/null +++ b/contrib/draco/cmake/toolchains/x86_64-ios.cmake @@ -0,0 +1,15 @@ +if(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_) + return() +endif() +set(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_ 1) + +if(XCODE) + # TODO(tomfinegan): Handle arm builds in Xcode. + message(FATAL_ERROR "This toolchain does not support Xcode.") +endif() + +set(CMAKE_SYSTEM_PROCESSOR "x86_64") +set(CMAKE_OSX_ARCHITECTURES "x86_64") +set(CMAKE_OSX_SDK "iphonesimulator") + +include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake") diff --git a/contrib/draco/cmake/util.cmake b/contrib/draco/cmake/util.cmake new file mode 100644 index 000000000..813146a62 --- /dev/null +++ b/contrib/draco/cmake/util.cmake @@ -0,0 +1,79 @@ +if(DRACO_CMAKE_UTIL_CMAKE_) + return() +endif() +set(DRACO_CMAKE_UTIL_CMAKE_ 1) + +# Creates dummy source file in $draco_build_dir named $basename.$extension and +# returns the full path to the dummy source file via the $out_file_path +# parameter. +function(create_dummy_source_file basename extension out_file_path) + set(dummy_source_file "${draco_build_dir}/${basename}.${extension}") + file(WRITE "${dummy_source_file}.new" + "// Generated file. DO NOT EDIT!\n" + "// ${target_name} needs a ${extension} file to force link language, \n" + "// or to silence a harmless CMake warning: Ignore me.\n" + "void ${target_name}_dummy_function(void) {}\n") + + # Will replace ${dummy_source_file} only if the file content has changed. + # This prevents forced Draco rebuilds after CMake runs. + configure_file("${dummy_source_file}.new" "${dummy_source_file}") + file(REMOVE "${dummy_source_file}.new") + + set(${out_file_path} ${dummy_source_file} PARENT_SCOPE) +endfunction() + +# Convenience function for adding a dummy source file to $target_name using +# $extension as the file extension. Wraps create_dummy_source_file(). +function(add_dummy_source_file_to_target target_name extension) + create_dummy_source_file("${target_name}" "${extension}" "dummy_source_file") + target_sources(${target_name} PRIVATE ${dummy_source_file}) +endfunction() + +# Extracts the version number from $version_file and returns it to the user via +# $version_string_out_var. This is achieved by finding the first instance of the +# kDracoVersion variable and then removing everything but the string literal +# assigned to the variable. Quotes and semicolon are stripped from the returned +# string. +function(extract_version_string version_file version_string_out_var) + file(STRINGS "${version_file}" draco_version REGEX "kDracoVersion") + list(GET draco_version 0 draco_version) + string(REPLACE "static const char kDracoVersion[] = " "" draco_version + "${draco_version}") + string(REPLACE ";" "" draco_version "${draco_version}") + string(REPLACE "\"" "" draco_version "${draco_version}") + set("${version_string_out_var}" "${draco_version}" PARENT_SCOPE) +endfunction() + +# Sets CMake compiler launcher to $launcher_name when $launcher_name is found in +# $PATH. Warns user about ignoring build flag $launcher_flag when $launcher_name +# is not found in $PATH. +function(set_compiler_launcher launcher_flag launcher_name) + find_program(launcher_path "${launcher_name}") + if(launcher_path) + set(CMAKE_C_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE) + set(CMAKE_CXX_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE) + message("--- Using ${launcher_name} as compiler launcher.") + else() + message( + WARNING "--- Cannot find ${launcher_name}, ${launcher_flag} ignored.") + endif() +endfunction() + +# Terminates CMake execution when $var_name is unset in the environment. Sets +# CMake variable to the value of the environment variable when the variable is +# present in the environment. +macro(require_variable var_name) + if("$ENV{${var_name}}" STREQUAL "") + message(FATAL_ERROR "${var_name} must be set in environment.") + endif() + set_variable_if_unset(${var_name} "") +endmacro() + +# Sets $var_name to $default_value if not already set. +macro(set_variable_if_unset var_name default_value) + if(NOT "$ENV{${var_name}}" STREQUAL "") + set(${var_name} $ENV{${var_name}}) + elseif(NOT ${var_name}) + set(${var_name} ${default_value}) + endif() +endmacro() diff --git a/contrib/draco/src/draco/animation/keyframe_animation.cc b/contrib/draco/src/draco/animation/keyframe_animation.cc new file mode 100644 index 000000000..eaf94a330 --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation.cc @@ -0,0 +1,54 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation.h" + +namespace draco { + +KeyframeAnimation::KeyframeAnimation() {} + +bool KeyframeAnimation::SetTimestamps( + const std::vector ×tamp) { + // Already added attributes. + const int32_t num_frames = timestamp.size(); + if (num_attributes() > 0) { + // Timestamp attribute could be added only once. + if (timestamps()->size()) { + return false; + } else { + // Check if the number of frames is consistent with + // the existing keyframes. + if (num_frames != num_points()) { + return false; + } + } + } else { + // This is the first attribute. + set_num_frames(num_frames); + } + + // Add attribute for time stamp data. + std::unique_ptr timestamp_att = + std::unique_ptr(new PointAttribute()); + timestamp_att->Init(GeometryAttribute::GENERIC, 1, DT_FLOAT32, false, + num_frames); + for (PointIndex i(0); i < num_frames; ++i) { + timestamp_att->SetAttributeValue(timestamp_att->mapped_index(i), + ×tamp[i.value()]); + } + this->SetAttribute(kTimestampId, std::move(timestamp_att)); + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation.h b/contrib/draco/src/draco/animation/keyframe_animation.h new file mode 100644 index 000000000..a7afb2b81 --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_H_ +#define DRACO_ANIMATION_KEYFRAME_ANIMATION_H_ + +#include + +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +// Class for holding keyframe animation data. It will have two or more +// attributes as a point cloud. The first attribute is always the timestamp +// of the animation. Each KeyframeAnimation could have multiple animations with +// the same number of frames. Each animation will be treated as a point +// attribute. +class KeyframeAnimation : public PointCloud { + public: + // Force time stamp to be float type. + using TimestampType = float; + + KeyframeAnimation(); + + // Animation must have only one timestamp attribute. + // This function must be called before adding any animation data. + // Returns false if timestamp already exists. + bool SetTimestamps(const std::vector ×tamp); + + // Returns an id for the added animation data. This id will be used to + // identify this animation. + // Returns -1 if error, e.g. number of frames is not consistent. + // Type |T| should be consistent with |DataType|, e.g: + // float - DT_FLOAT32, + // int32_t - DT_INT32, ... + template + int32_t AddKeyframes(DataType data_type, uint32_t num_components, + const std::vector &data); + + const PointAttribute *timestamps() const { + return GetAttributeByUniqueId(kTimestampId); + } + const PointAttribute *keyframes(int32_t animation_id) const { + return GetAttributeByUniqueId(animation_id); + } + + // Number of frames should be equal to number points in the point cloud. + void set_num_frames(int32_t num_frames) { set_num_points(num_frames); } + int32_t num_frames() const { return static_cast(num_points()); } + + int32_t num_animations() const { return num_attributes() - 1; } + + private: + // Attribute id of timestamp is fixed to 0. + static constexpr int32_t kTimestampId = 0; +}; + +template +int32_t KeyframeAnimation::AddKeyframes(DataType data_type, + uint32_t num_components, + const std::vector &data) { + // TODO(draco-eng): Verify T is consistent with |data_type|. + if (num_components == 0) { + return -1; + } + // If timestamps is not added yet, then reserve attribute 0 for timestamps. + if (!num_attributes()) { + // Add a temporary attribute with 0 points to fill attribute id 0. + std::unique_ptr temp_att = + std::unique_ptr(new PointAttribute()); + temp_att->Init(GeometryAttribute::GENERIC, num_components, data_type, false, + 0); + this->AddAttribute(std::move(temp_att)); + + set_num_frames(data.size() / num_components); + } + + if (data.size() != num_components * num_frames()) { + return -1; + } + + std::unique_ptr keyframe_att = + std::unique_ptr(new PointAttribute()); + keyframe_att->Init(GeometryAttribute::GENERIC, num_components, data_type, + false, num_frames()); + const size_t stride = num_components; + for (PointIndex i(0); i < num_frames(); ++i) { + keyframe_att->SetAttributeValue(keyframe_att->mapped_index(i), + &data[i.value() * stride]); + } + return this->AddAttribute(std::move(keyframe_att)); +} + +} // namespace draco + +#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_H_ diff --git a/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc b/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc new file mode 100644 index 000000000..20659468d --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_decoder.cc @@ -0,0 +1,30 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation_decoder.h" + +namespace draco { + +Status KeyframeAnimationDecoder::Decode(const DecoderOptions &options, + DecoderBuffer *in_buffer, + KeyframeAnimation *animation) { + const auto status = PointCloudSequentialDecoder::Decode( + options, in_buffer, static_cast(animation)); + if (!status.ok()) { + return status; + } + return OkStatus(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation_decoder.h b/contrib/draco/src/draco/animation/keyframe_animation_decoder.h new file mode 100644 index 000000000..fdf086b3a --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_decoder.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_ +#define DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_ + +#include "draco/animation/keyframe_animation.h" +#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h" + +namespace draco { + +// Class for decoding keyframe animation. +class KeyframeAnimationDecoder : private PointCloudSequentialDecoder { + public: + KeyframeAnimationDecoder(){}; + + Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer, + KeyframeAnimation *animation); +}; + +} // namespace draco + +#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_ diff --git a/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc b/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc new file mode 100644 index 000000000..f7d84f310 --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_encoder.cc @@ -0,0 +1,28 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation_encoder.h" + +namespace draco { + +KeyframeAnimationEncoder::KeyframeAnimationEncoder() {} + +Status KeyframeAnimationEncoder::EncodeKeyframeAnimation( + const KeyframeAnimation &animation, const EncoderOptions &options, + EncoderBuffer *out_buffer) { + SetPointCloud(animation); + return Encode(options, out_buffer); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation_encoder.h b/contrib/draco/src/draco/animation/keyframe_animation_encoder.h new file mode 100644 index 000000000..6096c79fa --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_encoder.h @@ -0,0 +1,39 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_ +#define DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_ + +#include "draco/animation/keyframe_animation.h" +#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h" + +namespace draco { + +// Class for encoding keyframe animation. It takes KeyframeAnimation as a +// PointCloud and compress it. It's mostly a wrapper around PointCloudEncoder so +// that the animation module could be separated from geometry compression when +// exposed to developers. +class KeyframeAnimationEncoder : private PointCloudSequentialEncoder { + public: + KeyframeAnimationEncoder(); + + // Encode an animation to a buffer. + Status EncodeKeyframeAnimation(const KeyframeAnimation &animation, + const EncoderOptions &options, + EncoderBuffer *out_buffer); +}; + +} // namespace draco + +#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_ diff --git a/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc b/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc new file mode 100644 index 000000000..4a6491f9d --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_encoding_test.cc @@ -0,0 +1,168 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation.h" +#include "draco/animation/keyframe_animation_decoder.h" +#include "draco/animation/keyframe_animation_encoder.h" +#include "draco/core/draco_test_base.h" +#include "draco/core/draco_test_utils.h" + +namespace draco { + +class KeyframeAnimationEncodingTest : public ::testing::Test { + protected: + KeyframeAnimationEncodingTest() {} + + bool CreateAndAddTimestamps(int32_t num_frames) { + timestamps_.resize(num_frames); + for (int i = 0; i < timestamps_.size(); ++i) + timestamps_[i] = static_cast(i); + return keyframe_animation_.SetTimestamps(timestamps_); + } + + int32_t CreateAndAddAnimationData(int32_t num_frames, + uint32_t num_components) { + // Create and add animation data with. + animation_data_.resize(num_frames * num_components); + for (int i = 0; i < animation_data_.size(); ++i) + animation_data_[i] = static_cast(i); + return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components, + animation_data_); + } + + template + void CompareAnimationData(const KeyframeAnimation &animation0, + const KeyframeAnimation &animation1, + bool quantized) { + ASSERT_EQ(animation0.num_frames(), animation1.num_frames()); + ASSERT_EQ(animation0.num_animations(), animation1.num_animations()); + + if (quantized) { + // TODO(hemmer) : Add test for stable quantization. + // Quantization will result in slightly different values. + // Skip comparing values. + return; + } + + // Compare time stamp. + const auto timestamp_att0 = animation0.timestamps(); + const auto timestamp_att1 = animation0.timestamps(); + for (int i = 0; i < animation0.num_frames(); ++i) { + std::array att_value0; + std::array att_value1; + ASSERT_TRUE((timestamp_att0->GetValue( + draco::AttributeValueIndex(i), &att_value0))); + ASSERT_TRUE((timestamp_att1->GetValue( + draco::AttributeValueIndex(i), &att_value1))); + ASSERT_FLOAT_EQ(att_value0[0], att_value1[0]); + } + + for (int animation_id = 1; animation_id < animation0.num_animations(); + ++animation_id) { + // Compare keyframe data. + const auto keyframe_att0 = animation0.keyframes(animation_id); + const auto keyframe_att1 = animation1.keyframes(animation_id); + ASSERT_EQ(keyframe_att0->num_components(), + keyframe_att1->num_components()); + for (int i = 0; i < animation0.num_frames(); ++i) { + std::array att_value0; + std::array att_value1; + ASSERT_TRUE((keyframe_att0->GetValue( + draco::AttributeValueIndex(i), &att_value0))); + ASSERT_TRUE((keyframe_att1->GetValue( + draco::AttributeValueIndex(i), &att_value1))); + for (int j = 0; j < att_value0.size(); ++j) { + ASSERT_FLOAT_EQ(att_value0[j], att_value1[j]); + } + } + } + } + + template + void TestKeyframeAnimationEncoding() { + TestKeyframeAnimationEncoding(false); + } + + template + void TestKeyframeAnimationEncoding(bool quantized) { + // Encode animation class. + draco::EncoderBuffer buffer; + draco::KeyframeAnimationEncoder encoder; + EncoderOptions options = EncoderOptions::CreateDefaultOptions(); + if (quantized) { + // Set quantization for timestamps. + options.SetAttributeInt(0, "quantization_bits", 20); + // Set quantization for keyframes. + for (int i = 1; i <= keyframe_animation_.num_animations(); ++i) { + options.SetAttributeInt(i, "quantization_bits", 20); + } + } + + ASSERT_TRUE( + encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer) + .ok()); + + draco::DecoderBuffer dec_decoder; + draco::KeyframeAnimationDecoder decoder; + DecoderBuffer dec_buffer; + dec_buffer.Init(buffer.data(), buffer.size()); + + // Decode animation class. + std::unique_ptr decoded_animation( + new KeyframeAnimation()); + DecoderOptions dec_options; + ASSERT_TRUE( + decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()).ok()); + + // Verify if animation before and after compression is identical. + CompareAnimationData(keyframe_animation_, + *decoded_animation, quantized); + } + + draco::KeyframeAnimation keyframe_animation_; + std::vector timestamps_; + std::vector animation_data_; +}; + +TEST_F(KeyframeAnimationEncodingTest, OneComponent) { + const int num_frames = 1; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1); + TestKeyframeAnimationEncoding<1>(); +} + +TEST_F(KeyframeAnimationEncodingTest, ManyComponents) { + const int num_frames = 100; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 100), 1); + TestKeyframeAnimationEncoding<100>(); +} + +TEST_F(KeyframeAnimationEncodingTest, ManyComponentsWithQuantization) { + const int num_frames = 100; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 4), 1); + // Test compression with quantization. + TestKeyframeAnimationEncoding<4>(true); +} + +TEST_F(KeyframeAnimationEncodingTest, MultipleAnimations) { + const int num_frames = 5; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 1); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 2); + TestKeyframeAnimationEncoding<3>(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/animation/keyframe_animation_test.cc b/contrib/draco/src/draco/animation/keyframe_animation_test.cc new file mode 100644 index 000000000..bc92b25ff --- /dev/null +++ b/contrib/draco/src/draco/animation/keyframe_animation_test.cc @@ -0,0 +1,102 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/animation/keyframe_animation.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +class KeyframeAnimationTest : public ::testing::Test { + protected: + KeyframeAnimationTest() {} + + bool CreateAndAddTimestamps(int32_t num_frames) { + timestamps_.resize(num_frames); + for (int i = 0; i < timestamps_.size(); ++i) + timestamps_[i] = static_cast(i); + return keyframe_animation_.SetTimestamps(timestamps_); + } + + int32_t CreateAndAddAnimationData(int32_t num_frames, + uint32_t num_components) { + // Create and add animation data with. + animation_data_.resize(num_frames * num_components); + for (int i = 0; i < animation_data_.size(); ++i) + animation_data_[i] = static_cast(i); + return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components, + animation_data_); + } + + template + void CompareAnimationData() { + // Compare time stamp. + const auto timestamp_att = keyframe_animation_.timestamps(); + for (int i = 0; i < timestamps_.size(); ++i) { + std::array att_value; + ASSERT_TRUE((timestamp_att->GetValue( + draco::AttributeValueIndex(i), &att_value))); + ASSERT_FLOAT_EQ(att_value[0], i); + } + + // Compare keyframe data. + const auto keyframe_att = keyframe_animation_.keyframes(1); + for (int i = 0; i < animation_data_.size() / num_components_t; ++i) { + std::array att_value; + ASSERT_TRUE((keyframe_att->GetValue( + draco::AttributeValueIndex(i), &att_value))); + for (int j = 0; j < num_components_t; ++j) { + ASSERT_FLOAT_EQ(att_value[j], i * num_components_t + j); + } + } + } + + template + void TestKeyframeAnimation(int32_t num_frames) { + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, num_components_t), 1); + CompareAnimationData(); + } + + draco::KeyframeAnimation keyframe_animation_; + std::vector timestamps_; + std::vector animation_data_; +}; + +// Test animation with 1 component and 10 frames. +TEST_F(KeyframeAnimationTest, OneComponent) { TestKeyframeAnimation<1>(10); } + +// Test animation with 4 component and 10 frames. +TEST_F(KeyframeAnimationTest, FourComponent) { TestKeyframeAnimation<4>(10); } + +// Test adding animation data before timestamp. +TEST_F(KeyframeAnimationTest, AddingAnimationFirst) { + ASSERT_EQ(CreateAndAddAnimationData(5, 1), 1); + ASSERT_TRUE(CreateAndAddTimestamps(5)); +} + +// Test adding timestamp more than once. +TEST_F(KeyframeAnimationTest, ErrorAddingTimestampsTwice) { + ASSERT_TRUE(CreateAndAddTimestamps(5)); + ASSERT_FALSE(CreateAndAddTimestamps(5)); +} +// Test animation with multiple animation data. +TEST_F(KeyframeAnimationTest, MultipleAnimationData) { + const int num_frames = 5; + ASSERT_TRUE(CreateAndAddTimestamps(num_frames)); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1); + ASSERT_EQ(CreateAndAddAnimationData(num_frames, 2), 2); +} + +} // namespace diff --git a/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc new file mode 100644 index 000000000..51c3bb6c8 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.cc @@ -0,0 +1,145 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "draco/attributes/attribute_octahedron_transform.h" + +#include "draco/attributes/attribute_transform_type.h" +#include "draco/compression/attributes/normal_compression_utils.h" + +namespace draco { + +bool AttributeOctahedronTransform::InitFromAttribute( + const PointAttribute &attribute) { + const AttributeTransformData *const transform_data = + attribute.GetAttributeTransformData(); + if (!transform_data || + transform_data->transform_type() != ATTRIBUTE_OCTAHEDRON_TRANSFORM) { + return false; // Wrong transform type. + } + quantization_bits_ = transform_data->GetParameterValue(0); + return true; +} + +void AttributeOctahedronTransform::CopyToAttributeTransformData( + AttributeTransformData *out_data) const { + out_data->set_transform_type(ATTRIBUTE_OCTAHEDRON_TRANSFORM); + out_data->AppendParameterValue(quantization_bits_); +} + +bool AttributeOctahedronTransform::TransformAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + PointAttribute *target_attribute) { + return GeneratePortableAttribute(attribute, point_ids, + target_attribute->size(), target_attribute); +} + +bool AttributeOctahedronTransform::InverseTransformAttribute( + const PointAttribute &attribute, PointAttribute *target_attribute) { + if (target_attribute->data_type() != DT_FLOAT32) { + return false; + } + + const int num_points = target_attribute->size(); + const int num_components = target_attribute->num_components(); + if (num_components != 3) { + return false; + } + constexpr int kEntrySize = sizeof(float) * 3; + float att_val[3]; + const int32_t *source_attribute_data = reinterpret_cast( + attribute.GetAddress(AttributeValueIndex(0))); + uint8_t *target_address = + target_attribute->GetAddress(AttributeValueIndex(0)); + OctahedronToolBox octahedron_tool_box; + if (!octahedron_tool_box.SetQuantizationBits(quantization_bits_)) { + return false; + } + for (uint32_t i = 0; i < num_points; ++i) { + const int32_t s = *source_attribute_data++; + const int32_t t = *source_attribute_data++; + octahedron_tool_box.QuantizedOctahedralCoordsToUnitVector(s, t, att_val); + + // Store the decoded floating point values into the attribute buffer. + std::memcpy(target_address, att_val, kEntrySize); + target_address += kEntrySize; + } + return true; +} + +void AttributeOctahedronTransform::SetParameters(int quantization_bits) { + quantization_bits_ = quantization_bits; +} + +bool AttributeOctahedronTransform::EncodeParameters( + EncoderBuffer *encoder_buffer) const { + if (is_initialized()) { + encoder_buffer->Encode(static_cast(quantization_bits_)); + return true; + } + return false; +} + +bool AttributeOctahedronTransform::DecodeParameters( + const PointAttribute &attribute, DecoderBuffer *decoder_buffer) { + uint8_t quantization_bits; + if (!decoder_buffer->Decode(&quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + return true; +} + +bool AttributeOctahedronTransform::GeneratePortableAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + int num_points, PointAttribute *target_attribute) const { + DRACO_DCHECK(is_initialized()); + + // Quantize all values in the order given by point_ids into portable + // attribute. + int32_t *const portable_attribute_data = reinterpret_cast( + target_attribute->GetAddress(AttributeValueIndex(0))); + float att_val[3]; + int32_t dst_index = 0; + OctahedronToolBox converter; + if (!converter.SetQuantizationBits(quantization_bits_)) { + return false; + } + if (!point_ids.empty()) { + for (uint32_t i = 0; i < point_ids.size(); ++i) { + const AttributeValueIndex att_val_id = + attribute.mapped_index(point_ids[i]); + attribute.GetValue(att_val_id, att_val); + // Encode the vector into a s and t octahedral coordinates. + int32_t s, t; + converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t); + portable_attribute_data[dst_index++] = s; + portable_attribute_data[dst_index++] = t; + } + } else { + for (PointIndex i(0); i < num_points; ++i) { + const AttributeValueIndex att_val_id = attribute.mapped_index(i); + attribute.GetValue(att_val_id, att_val); + // Encode the vector into a s and t octahedral coordinates. + int32_t s, t; + converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t); + portable_attribute_data[dst_index++] = s; + portable_attribute_data[dst_index++] = t; + } + } + + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h new file mode 100644 index 000000000..21a1725bb --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_octahedron_transform.h @@ -0,0 +1,81 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ + +#include "draco/attributes/attribute_transform.h" +#include "draco/attributes/point_attribute.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Attribute transform for attributes transformed to octahedral coordinates. +class AttributeOctahedronTransform : public AttributeTransform { + public: + AttributeOctahedronTransform() : quantization_bits_(-1) {} + + // Return attribute transform type. + AttributeTransformType Type() const override { + return ATTRIBUTE_OCTAHEDRON_TRANSFORM; + } + // Try to init transform from attribute. + bool InitFromAttribute(const PointAttribute &attribute) override; + // Copy parameter values into the provided AttributeTransformData instance. + void CopyToAttributeTransformData( + AttributeTransformData *out_data) const override; + + bool TransformAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + PointAttribute *target_attribute) override; + + bool InverseTransformAttribute(const PointAttribute &attribute, + PointAttribute *target_attribute) override; + + // Set number of quantization bits. + void SetParameters(int quantization_bits); + + // Encode relevant parameters into buffer. + bool EncodeParameters(EncoderBuffer *encoder_buffer) const override; + + bool DecodeParameters(const PointAttribute &attribute, + DecoderBuffer *decoder_buffer) override; + + bool is_initialized() const { return quantization_bits_ != -1; } + int32_t quantization_bits() const { return quantization_bits_; } + + protected: + DataType GetTransformedDataType( + const PointAttribute &attribute) const override { + return DT_UINT32; + } + int GetTransformedNumComponents( + const PointAttribute &attribute) const override { + return 2; + } + + // Perform the actual transformation. + bool GeneratePortableAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + int num_points, + PointAttribute *target_attribute) const; + + private: + int32_t quantization_bits_; +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc b/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc new file mode 100644 index 000000000..a7f93a488 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_quantization_transform.cc @@ -0,0 +1,260 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/attribute_quantization_transform.h" + +#include "draco/attributes/attribute_transform_type.h" +#include "draco/core/quantization_utils.h" + +namespace draco { + +bool AttributeQuantizationTransform::InitFromAttribute( + const PointAttribute &attribute) { + const AttributeTransformData *const transform_data = + attribute.GetAttributeTransformData(); + if (!transform_data || + transform_data->transform_type() != ATTRIBUTE_QUANTIZATION_TRANSFORM) { + return false; // Wrong transform type. + } + int32_t byte_offset = 0; + quantization_bits_ = transform_data->GetParameterValue(byte_offset); + byte_offset += 4; + min_values_.resize(attribute.num_components()); + for (int i = 0; i < attribute.num_components(); ++i) { + min_values_[i] = transform_data->GetParameterValue(byte_offset); + byte_offset += 4; + } + range_ = transform_data->GetParameterValue(byte_offset); + return true; +} + +// Copy parameter values into the provided AttributeTransformData instance. +void AttributeQuantizationTransform::CopyToAttributeTransformData( + AttributeTransformData *out_data) const { + out_data->set_transform_type(ATTRIBUTE_QUANTIZATION_TRANSFORM); + out_data->AppendParameterValue(quantization_bits_); + for (int i = 0; i < min_values_.size(); ++i) { + out_data->AppendParameterValue(min_values_[i]); + } + out_data->AppendParameterValue(range_); +} + +bool AttributeQuantizationTransform::TransformAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + PointAttribute *target_attribute) { + if (point_ids.empty()) { + GeneratePortableAttribute(attribute, target_attribute->size(), + target_attribute); + } else { + GeneratePortableAttribute(attribute, point_ids, target_attribute->size(), + target_attribute); + } + return true; +} + +bool AttributeQuantizationTransform::InverseTransformAttribute( + const PointAttribute &attribute, PointAttribute *target_attribute) { + if (target_attribute->data_type() != DT_FLOAT32) { + return false; + } + + // Convert all quantized values back to floats. + const int32_t max_quantized_value = + (1u << static_cast(quantization_bits_)) - 1; + const int num_components = target_attribute->num_components(); + const int entry_size = sizeof(float) * num_components; + const std::unique_ptr att_val(new float[num_components]); + int quant_val_id = 0; + int out_byte_pos = 0; + Dequantizer dequantizer; + if (!dequantizer.Init(range_, max_quantized_value)) { + return false; + } + const int32_t *const source_attribute_data = + reinterpret_cast( + attribute.GetAddress(AttributeValueIndex(0))); + + const int num_values = target_attribute->size(); + + for (uint32_t i = 0; i < num_values; ++i) { + for (int c = 0; c < num_components; ++c) { + float value = + dequantizer.DequantizeFloat(source_attribute_data[quant_val_id++]); + value = value + min_values_[c]; + att_val[c] = value; + } + // Store the floating point value into the attribute buffer. + target_attribute->buffer()->Write(out_byte_pos, att_val.get(), entry_size); + out_byte_pos += entry_size; + } + return true; +} + +bool AttributeQuantizationTransform::IsQuantizationValid( + int quantization_bits) { + // Currently we allow only up to 30 bit quantization. + return quantization_bits >= 1 && quantization_bits <= 30; +} + +bool AttributeQuantizationTransform::SetParameters(int quantization_bits, + const float *min_values, + int num_components, + float range) { + if (!IsQuantizationValid(quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + min_values_.assign(min_values, min_values + num_components); + range_ = range; + return true; +} + +bool AttributeQuantizationTransform::ComputeParameters( + const PointAttribute &attribute, const int quantization_bits) { + if (quantization_bits_ != -1) { + return false; // already initialized. + } + if (!IsQuantizationValid(quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + + const int num_components = attribute.num_components(); + range_ = 0.f; + min_values_ = std::vector(num_components, 0.f); + const std::unique_ptr max_values(new float[num_components]); + const std::unique_ptr att_val(new float[num_components]); + // Compute minimum values and max value difference. + attribute.GetValue(AttributeValueIndex(0), att_val.get()); + attribute.GetValue(AttributeValueIndex(0), min_values_.data()); + attribute.GetValue(AttributeValueIndex(0), max_values.get()); + + for (AttributeValueIndex i(1); i < static_cast(attribute.size()); + ++i) { + attribute.GetValue(i, att_val.get()); + for (int c = 0; c < num_components; ++c) { + if (min_values_[c] > att_val[c]) { + min_values_[c] = att_val[c]; + } + if (max_values[c] < att_val[c]) { + max_values[c] = att_val[c]; + } + } + } + for (int c = 0; c < num_components; ++c) { + if (std::isnan(min_values_[c]) || std::isinf(min_values_[c]) || + std::isnan(max_values[c]) || std::isinf(max_values[c])) { + return false; + } + const float dif = max_values[c] - min_values_[c]; + if (dif > range_) { + range_ = dif; + } + } + + // In case all values are the same, initialize the range to unit length. This + // will ensure that all values are quantized properly to the same value. + if (range_ == 0.f) { + range_ = 1.f; + } + + return true; +} + +bool AttributeQuantizationTransform::EncodeParameters( + EncoderBuffer *encoder_buffer) const { + if (is_initialized()) { + encoder_buffer->Encode(min_values_.data(), + sizeof(float) * min_values_.size()); + encoder_buffer->Encode(range_); + encoder_buffer->Encode(static_cast(quantization_bits_)); + return true; + } + return false; +} + +bool AttributeQuantizationTransform::DecodeParameters( + const PointAttribute &attribute, DecoderBuffer *decoder_buffer) { + min_values_.resize(attribute.num_components()); + if (!decoder_buffer->Decode(&min_values_[0], + sizeof(float) * min_values_.size())) { + return false; + } + if (!decoder_buffer->Decode(&range_)) { + return false; + } + uint8_t quantization_bits; + if (!decoder_buffer->Decode(&quantization_bits)) { + return false; + } + if (!IsQuantizationValid(quantization_bits)) { + return false; + } + quantization_bits_ = quantization_bits; + return true; +} + +void AttributeQuantizationTransform::GeneratePortableAttribute( + const PointAttribute &attribute, int num_points, + PointAttribute *target_attribute) const { + DRACO_DCHECK(is_initialized()); + + const int num_components = attribute.num_components(); + + // Quantize all values using the order given by point_ids. + int32_t *const portable_attribute_data = reinterpret_cast( + target_attribute->GetAddress(AttributeValueIndex(0))); + const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1; + Quantizer quantizer; + quantizer.Init(range(), max_quantized_value); + int32_t dst_index = 0; + const std::unique_ptr att_val(new float[num_components]); + for (PointIndex i(0); i < num_points; ++i) { + const AttributeValueIndex att_val_id = attribute.mapped_index(i); + attribute.GetValue(att_val_id, att_val.get()); + for (int c = 0; c < num_components; ++c) { + const float value = (att_val[c] - min_values()[c]); + const int32_t q_val = quantizer.QuantizeFloat(value); + portable_attribute_data[dst_index++] = q_val; + } + } +} + +void AttributeQuantizationTransform::GeneratePortableAttribute( + const PointAttribute &attribute, const std::vector &point_ids, + int num_points, PointAttribute *target_attribute) const { + DRACO_DCHECK(is_initialized()); + + const int num_components = attribute.num_components(); + + // Quantize all values using the order given by point_ids. + int32_t *const portable_attribute_data = reinterpret_cast( + target_attribute->GetAddress(AttributeValueIndex(0))); + const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1; + Quantizer quantizer; + quantizer.Init(range(), max_quantized_value); + int32_t dst_index = 0; + const std::unique_ptr att_val(new float[num_components]); + for (uint32_t i = 0; i < point_ids.size(); ++i) { + const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]); + attribute.GetValue(att_val_id, att_val.get()); + for (int c = 0; c < num_components; ++c) { + const float value = (att_val[c] - min_values()[c]); + const int32_t q_val = quantizer.QuantizeFloat(value); + portable_attribute_data[dst_index++] = q_val; + } + } +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/attribute_quantization_transform.h b/contrib/draco/src/draco/attributes/attribute_quantization_transform.h new file mode 100644 index 000000000..f1122b680 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_quantization_transform.h @@ -0,0 +1,102 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_ + +#include + +#include "draco/attributes/attribute_transform.h" +#include "draco/attributes/point_attribute.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Attribute transform for quantized attributes. +class AttributeQuantizationTransform : public AttributeTransform { + public: + AttributeQuantizationTransform() : quantization_bits_(-1), range_(0.f) {} + // Return attribute transform type. + AttributeTransformType Type() const override { + return ATTRIBUTE_QUANTIZATION_TRANSFORM; + } + // Try to init transform from attribute. + bool InitFromAttribute(const PointAttribute &attribute) override; + // Copy parameter values into the provided AttributeTransformData instance. + void CopyToAttributeTransformData( + AttributeTransformData *out_data) const override; + + bool TransformAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + PointAttribute *target_attribute) override; + + bool InverseTransformAttribute(const PointAttribute &attribute, + PointAttribute *target_attribute) override; + + bool SetParameters(int quantization_bits, const float *min_values, + int num_components, float range); + + bool ComputeParameters(const PointAttribute &attribute, + const int quantization_bits); + + // Encode relevant parameters into buffer. + bool EncodeParameters(EncoderBuffer *encoder_buffer) const override; + + bool DecodeParameters(const PointAttribute &attribute, + DecoderBuffer *decoder_buffer) override; + + int32_t quantization_bits() const { return quantization_bits_; } + float min_value(int axis) const { return min_values_[axis]; } + const std::vector &min_values() const { return min_values_; } + float range() const { return range_; } + bool is_initialized() const { return quantization_bits_ != -1; } + + protected: + // Create portable attribute using 1:1 mapping between points in the input and + // output attribute. + void GeneratePortableAttribute(const PointAttribute &attribute, + int num_points, + PointAttribute *target_attribute) const; + + // Create portable attribute using custom mapping between input and output + // points. + void GeneratePortableAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + int num_points, + PointAttribute *target_attribute) const; + + DataType GetTransformedDataType( + const PointAttribute &attribute) const override { + return DT_UINT32; + } + int GetTransformedNumComponents( + const PointAttribute &attribute) const override { + return attribute.num_components(); + } + + static bool IsQuantizationValid(int quantization_bits); + + private: + int32_t quantization_bits_; + + // Minimal dequantized value for each component of the attribute. + std::vector min_values_; + + // Bounds of the dequantized attribute (max delta over all components). + float range_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTE_DEQUANTIZATION_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_transform.cc b/contrib/draco/src/draco/attributes/attribute_transform.cc new file mode 100644 index 000000000..174e6b822 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform.cc @@ -0,0 +1,40 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/attribute_transform.h" + +namespace draco { + +bool AttributeTransform::TransferToAttribute(PointAttribute *attribute) const { + std::unique_ptr transform_data( + new AttributeTransformData()); + this->CopyToAttributeTransformData(transform_data.get()); + attribute->SetAttributeTransformData(std::move(transform_data)); + return true; +} + +std::unique_ptr AttributeTransform::InitTransformedAttribute( + const PointAttribute &src_attribute, int num_entries) { + const int num_components = GetTransformedNumComponents(src_attribute); + const DataType dt = GetTransformedDataType(src_attribute); + GeometryAttribute va; + va.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false, + num_components * DataTypeLength(dt), 0); + std::unique_ptr transformed_attribute(new PointAttribute(va)); + transformed_attribute->Reset(num_entries); + transformed_attribute->SetIdentityMapping(); + return transformed_attribute; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/attribute_transform.h b/contrib/draco/src/draco/attributes/attribute_transform.h new file mode 100644 index 000000000..62aad60db --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_ + +#include "draco/attributes/attribute_transform_data.h" +#include "draco/attributes/point_attribute.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Virtual base class for various attribute transforms, enforcing common +// interface where possible. +class AttributeTransform { + public: + virtual ~AttributeTransform() = default; + + // Return attribute transform type. + virtual AttributeTransformType Type() const = 0; + // Try to init transform from attribute. + virtual bool InitFromAttribute(const PointAttribute &attribute) = 0; + // Copy parameter values into the provided AttributeTransformData instance. + virtual void CopyToAttributeTransformData( + AttributeTransformData *out_data) const = 0; + bool TransferToAttribute(PointAttribute *attribute) const; + + // Applies the transform to |attribute| and stores the result in + // |target_attribute|. |point_ids| is an optional vector that can be used to + // remap values during the transform. + virtual bool TransformAttribute(const PointAttribute &attribute, + const std::vector &point_ids, + PointAttribute *target_attribute) = 0; + + // Applies an inverse transform to |attribute| and stores the result in + // |target_attribute|. In this case, |attribute| is an attribute that was + // already transformed (e.g. quantized) and |target_attribute| is the + // attribute before the transformation. + virtual bool InverseTransformAttribute(const PointAttribute &attribute, + PointAttribute *target_attribute) = 0; + + // Encodes all data needed by the transformation into the |encoder_buffer|. + virtual bool EncodeParameters(EncoderBuffer *encoder_buffer) const = 0; + + // Decodes all data needed to transform |attribute| back to the original + // format. + virtual bool DecodeParameters(const PointAttribute &attribute, + DecoderBuffer *decoder_buffer) = 0; + + // Initializes a transformed attribute that can be used as target in the + // TransformAttribute() function call. + virtual std::unique_ptr InitTransformedAttribute( + const PointAttribute &src_attribute, int num_entries); + + protected: + virtual DataType GetTransformedDataType( + const PointAttribute &attribute) const = 0; + virtual int GetTransformedNumComponents( + const PointAttribute &attribute) const = 0; +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_transform_data.h b/contrib/draco/src/draco/attributes/attribute_transform_data.h new file mode 100644 index 000000000..96ed07320 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform_data.h @@ -0,0 +1,71 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_ + +#include + +#include "draco/attributes/attribute_transform_type.h" +#include "draco/core/data_buffer.h" + +namespace draco { + +// Class for holding parameter values for an attribute transform of a +// PointAttribute. This can be for example quantization data for an attribute +// that holds quantized values. This class provides only a basic storage for +// attribute transform parameters and it should be accessed only through wrapper +// classes for a specific transform (e.g. AttributeQuantizationTransform). +class AttributeTransformData { + public: + AttributeTransformData() : transform_type_(ATTRIBUTE_INVALID_TRANSFORM) {} + AttributeTransformData(const AttributeTransformData &data) = default; + + // Returns the type of the attribute transform that is described by the class. + AttributeTransformType transform_type() const { return transform_type_; } + void set_transform_type(AttributeTransformType type) { + transform_type_ = type; + } + + // Returns a parameter value on a given |byte_offset|. + template + DataTypeT GetParameterValue(int byte_offset) const { + DataTypeT out_data; + buffer_.Read(byte_offset, &out_data, sizeof(DataTypeT)); + return out_data; + } + + // Sets a parameter value on a given |byte_offset|. + template + void SetParameterValue(int byte_offset, const DataTypeT &in_data) { + if (byte_offset + sizeof(DataTypeT) > buffer_.data_size()) { + buffer_.Resize(byte_offset + sizeof(DataTypeT)); + } + buffer_.Write(byte_offset, &in_data, sizeof(DataTypeT)); + } + + // Sets a parameter value at the end of the |buffer_|. + template + void AppendParameterValue(const DataTypeT &in_data) { + SetParameterValue(static_cast(buffer_.data_size()), in_data); + } + + private: + AttributeTransformType transform_type_; + DataBuffer buffer_; +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_ diff --git a/contrib/draco/src/draco/attributes/attribute_transform_type.h b/contrib/draco/src/draco/attributes/attribute_transform_type.h new file mode 100644 index 000000000..51ce6f333 --- /dev/null +++ b/contrib/draco/src/draco/attributes/attribute_transform_type.h @@ -0,0 +1,30 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_ +#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_ + +namespace draco { + +// List of all currently supported attribute transforms. +enum AttributeTransformType { + ATTRIBUTE_INVALID_TRANSFORM = -1, + ATTRIBUTE_NO_TRANSFORM = 0, + ATTRIBUTE_QUANTIZATION_TRANSFORM = 1, + ATTRIBUTE_OCTAHEDRON_TRANSFORM = 2, +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_ diff --git a/contrib/draco/src/draco/attributes/geometry_attribute.cc b/contrib/draco/src/draco/attributes/geometry_attribute.cc new file mode 100644 index 000000000..b62478426 --- /dev/null +++ b/contrib/draco/src/draco/attributes/geometry_attribute.cc @@ -0,0 +1,102 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/geometry_attribute.h" + +namespace draco { + +GeometryAttribute::GeometryAttribute() + : buffer_(nullptr), + num_components_(1), + data_type_(DT_FLOAT32), + byte_stride_(0), + byte_offset_(0), + attribute_type_(INVALID), + unique_id_(0) {} + +void GeometryAttribute::Init(GeometryAttribute::Type attribute_type, + DataBuffer *buffer, int8_t num_components, + DataType data_type, bool normalized, + int64_t byte_stride, int64_t byte_offset) { + buffer_ = buffer; + if (buffer) { + buffer_descriptor_.buffer_id = buffer->buffer_id(); + buffer_descriptor_.buffer_update_count = buffer->update_count(); + } + num_components_ = num_components; + data_type_ = data_type; + normalized_ = normalized; + byte_stride_ = byte_stride; + byte_offset_ = byte_offset; + attribute_type_ = attribute_type; +} + +bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) { + num_components_ = src_att.num_components_; + data_type_ = src_att.data_type_; + normalized_ = src_att.normalized_; + byte_stride_ = src_att.byte_stride_; + byte_offset_ = src_att.byte_offset_; + attribute_type_ = src_att.attribute_type_; + buffer_descriptor_ = src_att.buffer_descriptor_; + unique_id_ = src_att.unique_id_; + if (src_att.buffer_ == nullptr) { + buffer_ = nullptr; + } else { + if (buffer_ == nullptr) { + return false; + } + buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size()); + } + return true; +} + +bool GeometryAttribute::operator==(const GeometryAttribute &va) const { + if (attribute_type_ != va.attribute_type_) { + return false; + } + // It's OK to compare just the buffer descriptors here. We don't need to + // compare the buffers themselves. + if (buffer_descriptor_.buffer_id != va.buffer_descriptor_.buffer_id) { + return false; + } + if (buffer_descriptor_.buffer_update_count != + va.buffer_descriptor_.buffer_update_count) { + return false; + } + if (num_components_ != va.num_components_) { + return false; + } + if (data_type_ != va.data_type_) { + return false; + } + if (byte_stride_ != va.byte_stride_) { + return false; + } + if (byte_offset_ != va.byte_offset_) { + return false; + } + return true; +} + +void GeometryAttribute::ResetBuffer(DataBuffer *buffer, int64_t byte_stride, + int64_t byte_offset) { + buffer_ = buffer; + buffer_descriptor_.buffer_id = buffer->buffer_id(); + buffer_descriptor_.buffer_update_count = buffer->update_count(); + byte_stride_ = byte_stride; + byte_offset_ = byte_offset; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/geometry_attribute.h b/contrib/draco/src/draco/attributes/geometry_attribute.h new file mode 100644 index 000000000..f4d099b1b --- /dev/null +++ b/contrib/draco/src/draco/attributes/geometry_attribute.h @@ -0,0 +1,350 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_ +#define DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_ + +#include +#include + +#include "draco/attributes/geometry_indices.h" +#include "draco/core/data_buffer.h" +#include "draco/core/hash_utils.h" + +namespace draco { + +// The class provides access to a specific attribute which is stored in a +// DataBuffer, such as normals or coordinates. However, the GeometryAttribute +// class does not own the buffer and the buffer itself may store other data +// unrelated to this attribute (such as data for other attributes in which case +// we can have multiple GeometryAttributes accessing one buffer). Typically, +// all attributes for a point (or corner, face) are stored in one block, which +// is advantageous in terms of memory access. The length of the entire block is +// given by the byte_stride, the position where the attribute starts is given by +// the byte_offset, the actual number of bytes that the attribute occupies is +// given by the data_type and the number of components. +class GeometryAttribute { + public: + // Supported attribute types. + enum Type { + INVALID = -1, + // Named attributes start here. The difference between named and generic + // attributes is that for named attributes we know their purpose and we + // can apply some special methods when dealing with them (e.g. during + // encoding). + POSITION = 0, + NORMAL, + COLOR, + TEX_COORD, + // A special id used to mark attributes that are not assigned to any known + // predefined use case. Such attributes are often used for a shader specific + // data. + GENERIC, + // Total number of different attribute types. + // Always keep behind all named attributes. + NAMED_ATTRIBUTES_COUNT, + }; + + GeometryAttribute(); + // Initializes and enables the attribute. + void Init(Type attribute_type, DataBuffer *buffer, int8_t num_components, + DataType data_type, bool normalized, int64_t byte_stride, + int64_t byte_offset); + bool IsValid() const { return buffer_ != nullptr; } + + // Copies data from the source attribute to the this attribute. + // This attribute must have a valid buffer allocated otherwise the operation + // is going to fail and return false. + bool CopyFrom(const GeometryAttribute &src_att); + + // Function for getting a attribute value with a specific format. + // Unsafe. Caller must ensure the accessed memory is valid. + // T is the attribute data type. + // att_components_t is the number of attribute components. + template + std::array GetValue( + AttributeValueIndex att_index) const { + // Byte address of the attribute index. + const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value(); + std::array out; + buffer_->Read(byte_pos, &(out[0]), sizeof(out)); + return out; + } + + // Function for getting a attribute value with a specific format. + // T is the attribute data type. + // att_components_t is the number of attribute components. + template + bool GetValue(AttributeValueIndex att_index, + std::array *out) const { + // Byte address of the attribute index. + const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value(); + // Check we are not reading past end of data. + if (byte_pos + sizeof(*out) > buffer_->data_size()) { + return false; + } + buffer_->Read(byte_pos, &((*out)[0]), sizeof(*out)); + return true; + } + + // Returns the byte position of the attribute entry in the data buffer. + inline int64_t GetBytePos(AttributeValueIndex att_index) const { + return byte_offset_ + byte_stride_ * att_index.value(); + } + + inline const uint8_t *GetAddress(AttributeValueIndex att_index) const { + const int64_t byte_pos = GetBytePos(att_index); + return buffer_->data() + byte_pos; + } + inline uint8_t *GetAddress(AttributeValueIndex att_index) { + const int64_t byte_pos = GetBytePos(att_index); + return buffer_->data() + byte_pos; + } + inline bool IsAddressValid(const uint8_t *address) const { + return ((buffer_->data() + buffer_->data_size()) > address); + } + + // Fills out_data with the raw value of the requested attribute entry. + // out_data must be at least byte_stride_ long. + void GetValue(AttributeValueIndex att_index, void *out_data) const { + const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value(); + buffer_->Read(byte_pos, out_data, byte_stride_); + } + + // Sets a value of an attribute entry. The input value must be allocated to + // cover all components of a single attribute entry. + void SetAttributeValue(AttributeValueIndex entry_index, const void *value) { + const int64_t byte_pos = entry_index.value() * byte_stride(); + buffer_->Write(byte_pos, value, byte_stride()); + } + + // DEPRECATED: Use + // ConvertValue(AttributeValueIndex att_id, + // int out_num_components, + // OutT *out_val); + // + // Function for conversion of a attribute to a specific output format. + // OutT is the desired data type of the attribute. + // out_att_components_t is the number of components of the output format. + // Returns false when the conversion failed. + template + bool ConvertValue(AttributeValueIndex att_id, OutT *out_val) const { + return ConvertValue(att_id, out_att_components_t, out_val); + } + + // Function for conversion of a attribute to a specific output format. + // |out_val| needs to be able to store |out_num_components| values. + // OutT is the desired data type of the attribute. + // Returns false when the conversion failed. + template + bool ConvertValue(AttributeValueIndex att_id, int8_t out_num_components, + OutT *out_val) const { + if (out_val == nullptr) { + return false; + } + switch (data_type_) { + case DT_INT8: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT8: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_INT16: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT16: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_INT32: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT32: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_INT64: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_UINT64: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_FLOAT32: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_FLOAT64: + return ConvertTypedValue(att_id, out_num_components, + out_val); + case DT_BOOL: + return ConvertTypedValue(att_id, out_num_components, + out_val); + default: + // Wrong attribute type. + return false; + } + } + + // Function for conversion of a attribute to a specific output format. + // The |out_value| must be able to store all components of a single attribute + // entry. + // OutT is the desired data type of the attribute. + // Returns false when the conversion failed. + template + bool ConvertValue(AttributeValueIndex att_index, OutT *out_value) const { + return ConvertValue(att_index, num_components_, out_value); + } + + // Utility function. Returns |attribute_type| as std::string. + static std::string TypeToString(Type attribute_type) { + switch (attribute_type) { + case INVALID: + return "INVALID"; + case POSITION: + return "POSITION"; + case NORMAL: + return "NORMAL"; + case COLOR: + return "COLOR"; + case TEX_COORD: + return "TEX_COORD"; + case GENERIC: + return "GENERIC"; + default: + return "UNKNOWN"; + } + } + + bool operator==(const GeometryAttribute &va) const; + + // Returns the type of the attribute indicating the nature of the attribute. + Type attribute_type() const { return attribute_type_; } + void set_attribute_type(Type type) { attribute_type_ = type; } + // Returns the data type that is stored in the attribute. + DataType data_type() const { return data_type_; } + // Returns the number of components that are stored for each entry. + // For position attribute this is usually three (x,y,z), + // while texture coordinates have two components (u,v). + int8_t num_components() const { return num_components_; } + // Indicates whether the data type should be normalized before interpretation, + // that is, it should be divided by the max value of the data type. + bool normalized() const { return normalized_; } + // The buffer storing the entire data of the attribute. + const DataBuffer *buffer() const { return buffer_; } + // Returns the number of bytes between two attribute entries, this is, at + // least size of the data types times number of components. + int64_t byte_stride() const { return byte_stride_; } + // The offset where the attribute starts within the block of size byte_stride. + int64_t byte_offset() const { return byte_offset_; } + void set_byte_offset(int64_t byte_offset) { byte_offset_ = byte_offset; } + DataBufferDescriptor buffer_descriptor() const { return buffer_descriptor_; } + uint32_t unique_id() const { return unique_id_; } + void set_unique_id(uint32_t id) { unique_id_ = id; } + + protected: + // Sets a new internal storage for the attribute. + void ResetBuffer(DataBuffer *buffer, int64_t byte_stride, + int64_t byte_offset); + + private: + // Function for conversion of an attribute to a specific output format given a + // format of the stored attribute. + // T is the stored attribute data type. + // OutT is the desired data type of the attribute. + template + bool ConvertTypedValue(AttributeValueIndex att_id, int8_t out_num_components, + OutT *out_value) const { + const uint8_t *src_address = GetAddress(att_id); + + // Convert all components available in both the original and output formats. + for (int i = 0; i < std::min(num_components_, out_num_components); ++i) { + if (!IsAddressValid(src_address)) { + return false; + } + const T in_value = *reinterpret_cast(src_address); + + // Make sure the in_value fits within the range of values that OutT + // is able to represent. Perform the check only for integral types. + if (std::is_integral::value && std::is_integral::value) { + static constexpr OutT kOutMin = + std::is_signed::value ? std::numeric_limits::lowest() : 0; + if (in_value < kOutMin || in_value > std::numeric_limits::max()) { + return false; + } + } + + out_value[i] = static_cast(in_value); + // When converting integer to floating point, normalize the value if + // necessary. + if (std::is_integral::value && std::is_floating_point::value && + normalized_) { + out_value[i] /= static_cast(std::numeric_limits::max()); + } + // TODO(ostava): Add handling of normalized attributes when converting + // between different integer representations. If the attribute is + // normalized, integer values should be converted as if they represent 0-1 + // range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1> + // should be converted to range <0, 2^8 - 1>. + src_address += sizeof(T); + } + // Fill empty data for unused output components if needed. + for (int i = num_components_; i < out_num_components; ++i) { + out_value[i] = static_cast(0); + } + return true; + } + + DataBuffer *buffer_; + // The buffer descriptor is stored at the time the buffer is attached to this + // attribute. The purpose is to detect if any changes happened to the buffer + // since the time it was attached. + DataBufferDescriptor buffer_descriptor_; + int8_t num_components_; + DataType data_type_; + bool normalized_; + int64_t byte_stride_; + int64_t byte_offset_; + + Type attribute_type_; + + // Unique id of this attribute. No two attributes could have the same unique + // id. It is used to identify each attribute, especially when there are + // multiple attribute of the same type in a point cloud. + uint32_t unique_id_; + + friend struct GeometryAttributeHasher; +}; + +// Hashing support + +// Function object for using Attribute as a hash key. +struct GeometryAttributeHasher { + size_t operator()(const GeometryAttribute &va) const { + size_t hash = HashCombine(va.buffer_descriptor_.buffer_id, + va.buffer_descriptor_.buffer_update_count); + hash = HashCombine(va.num_components_, hash); + hash = HashCombine(static_cast(va.data_type_), hash); + hash = HashCombine(static_cast(va.attribute_type_), hash); + hash = HashCombine(va.byte_stride_, hash); + return HashCombine(va.byte_offset_, hash); + } +}; + +// Function object for using GeometryAttribute::Type as a hash key. +struct GeometryAttributeTypeHasher { + size_t operator()(const GeometryAttribute::Type &at) const { + return static_cast(at); + } +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_ diff --git a/contrib/draco/src/draco/attributes/geometry_indices.h b/contrib/draco/src/draco/attributes/geometry_indices.h new file mode 100644 index 000000000..80e43e30a --- /dev/null +++ b/contrib/draco/src/draco/attributes/geometry_indices.h @@ -0,0 +1,54 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_ +#define DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_ + +#include + +#include + +#include "draco/core/draco_index_type.h" + +namespace draco { + +// Index of an attribute value entry stored in a GeometryAttribute. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, AttributeValueIndex) +// Index of a point in a PointCloud. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, PointIndex) +// Vertex index in a Mesh or CornerTable. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, VertexIndex) +// Corner index that identifies a corner in a Mesh or CornerTable. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, CornerIndex) +// Face index for Mesh and CornerTable. +DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, FaceIndex) + +// Constants denoting invalid indices. +static constexpr AttributeValueIndex kInvalidAttributeValueIndex( + std::numeric_limits::max()); +static constexpr PointIndex kInvalidPointIndex( + std::numeric_limits::max()); +static constexpr VertexIndex kInvalidVertexIndex( + std::numeric_limits::max()); +static constexpr CornerIndex kInvalidCornerIndex( + std::numeric_limits::max()); +static constexpr FaceIndex kInvalidFaceIndex( + std::numeric_limits::max()); + +// TODO(ostava): Add strongly typed indices for attribute id and unique +// attribute id. + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_ diff --git a/contrib/draco/src/draco/attributes/point_attribute.cc b/contrib/draco/src/draco/attributes/point_attribute.cc new file mode 100644 index 000000000..b28f860c1 --- /dev/null +++ b/contrib/draco/src/draco/attributes/point_attribute.cc @@ -0,0 +1,225 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/point_attribute.h" + +#include + +using std::unordered_map; + +// Shortcut for typed conditionals. +template +using conditional_t = typename std::conditional::type; + +namespace draco { + +PointAttribute::PointAttribute() + : num_unique_entries_(0), identity_mapping_(false) {} + +PointAttribute::PointAttribute(const GeometryAttribute &att) + : GeometryAttribute(att), + num_unique_entries_(0), + identity_mapping_(false) {} + +void PointAttribute::Init(Type attribute_type, int8_t num_components, + DataType data_type, bool normalized, + size_t num_attribute_values) { + attribute_buffer_ = std::unique_ptr(new DataBuffer()); + GeometryAttribute::Init(attribute_type, attribute_buffer_.get(), + num_components, data_type, normalized, + DataTypeLength(data_type) * num_components, 0); + Reset(num_attribute_values); + SetIdentityMapping(); +} + +void PointAttribute::CopyFrom(const PointAttribute &src_att) { + if (buffer() == nullptr) { + // If the destination attribute doesn't have a valid buffer, create it. + attribute_buffer_ = std::unique_ptr(new DataBuffer()); + ResetBuffer(attribute_buffer_.get(), 0, 0); + } + if (!GeometryAttribute::CopyFrom(src_att)) { + return; + } + identity_mapping_ = src_att.identity_mapping_; + num_unique_entries_ = src_att.num_unique_entries_; + indices_map_ = src_att.indices_map_; + if (src_att.attribute_transform_data_) { + attribute_transform_data_ = std::unique_ptr( + new AttributeTransformData(*src_att.attribute_transform_data_)); + } else { + attribute_transform_data_ = nullptr; + } +} + +bool PointAttribute::Reset(size_t num_attribute_values) { + if (attribute_buffer_ == nullptr) { + attribute_buffer_ = std::unique_ptr(new DataBuffer()); + } + const int64_t entry_size = DataTypeLength(data_type()) * num_components(); + if (!attribute_buffer_->Update(nullptr, num_attribute_values * entry_size)) { + return false; + } + // Assign the new buffer to the parent attribute. + ResetBuffer(attribute_buffer_.get(), entry_size, 0); + num_unique_entries_ = static_cast(num_attribute_values); + return true; +} + +void PointAttribute::Resize(size_t new_num_unique_entries) { + num_unique_entries_ = static_cast(new_num_unique_entries); + attribute_buffer_->Resize(new_num_unique_entries * byte_stride()); +} + +#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED +AttributeValueIndex::ValueType PointAttribute::DeduplicateValues( + const GeometryAttribute &in_att) { + return DeduplicateValues(in_att, AttributeValueIndex(0)); +} + +AttributeValueIndex::ValueType PointAttribute::DeduplicateValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) { + AttributeValueIndex::ValueType unique_vals = 0; + switch (in_att.data_type()) { + // Currently we support only float, uint8, and uint16 arguments. + case DT_FLOAT32: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_INT8: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_UINT8: + case DT_BOOL: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_UINT16: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_INT16: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_UINT32: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + case DT_INT32: + unique_vals = DeduplicateTypedValues(in_att, in_att_offset); + break; + default: + return -1; // Unsupported data type. + } + if (unique_vals == 0) { + return -1; // Unexpected error. + } + return unique_vals; +} + +// Helper function for calling UnifyDuplicateAttributes +// with the correct template arguments. +// Returns the number of unique attribute values. +template +AttributeValueIndex::ValueType PointAttribute::DeduplicateTypedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) { + // Select the correct method to call based on the number of attribute + // components. + switch (in_att.num_components()) { + case 1: + return DeduplicateFormattedValues(in_att, in_att_offset); + case 2: + return DeduplicateFormattedValues(in_att, in_att_offset); + case 3: + return DeduplicateFormattedValues(in_att, in_att_offset); + case 4: + return DeduplicateFormattedValues(in_att, in_att_offset); + default: + return 0; + } +} + +template +AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) { + // We want to detect duplicates using a hash map but we cannot hash floating + // point numbers directly so bit-copy floats to the same sized integers and + // hash them. + + // First we need to determine which int type to use (1, 2, 4 or 8 bytes). + // Note, this is done at compile time using std::conditional struct. + // Conditional is in form . If bool-expression + // is true the "true" branch is used and vice versa. All at compile time. + typedef conditional_t>> + HashType; + + AttributeValueIndex unique_vals(0); + typedef std::array AttributeValue; + typedef std::array AttributeHashableValue; + // Hash map storing index of the first attribute with a given value. + unordered_map> + value_to_index_map; + AttributeValue att_value; + AttributeHashableValue hashable_value; + IndexTypeVector value_map( + num_unique_entries_); + for (AttributeValueIndex i(0); i < num_unique_entries_; ++i) { + const AttributeValueIndex att_pos = i + in_att_offset; + att_value = in_att.GetValue(att_pos); + // Convert the value to hashable type. Bit-copy real attributes to integers. + memcpy(&(hashable_value[0]), &(att_value[0]), sizeof(att_value)); + + // Check if the given attribute value has been used before already. + auto it = value_to_index_map.find(hashable_value); + if (it != value_to_index_map.end()) { + // Duplicated value found. Update index mapping. + value_map[i] = it->second; + } else { + // New unique value. + // Update the hash map with a new entry pointing to the latest unique + // vertex index. + value_to_index_map.insert( + std::pair(hashable_value, + unique_vals)); + // Add the unique value to the mesh builder. + SetAttributeValue(unique_vals, &att_value); + // Update index mapping. + value_map[i] = unique_vals; + + ++unique_vals; + } + } + if (unique_vals == num_unique_entries_) { + return unique_vals.value(); // Nothing has changed. + } + if (is_mapping_identity()) { + // Change identity mapping to the explicit one. + // The number of points is equal to the number of old unique values. + SetExplicitMapping(num_unique_entries_); + // Update the explicit map. + for (uint32_t i = 0; i < num_unique_entries_; ++i) { + SetPointMapEntry(PointIndex(i), value_map[AttributeValueIndex(i)]); + } + } else { + // Update point to value map using the mapping between old and new values. + for (PointIndex i(0); i < static_cast(indices_map_.size()); ++i) { + SetPointMapEntry(i, value_map[indices_map_[i]]); + } + } + num_unique_entries_ = unique_vals.value(); + return num_unique_entries_; +} +#endif + +} // namespace draco diff --git a/contrib/draco/src/draco/attributes/point_attribute.h b/contrib/draco/src/draco/attributes/point_attribute.h new file mode 100644 index 000000000..ee3662031 --- /dev/null +++ b/contrib/draco/src/draco/attributes/point_attribute.h @@ -0,0 +1,190 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_ +#define DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_ + +#include + +#include "draco/attributes/attribute_transform_data.h" +#include "draco/attributes/geometry_attribute.h" +#include "draco/core/draco_index_type_vector.h" +#include "draco/core/hash_utils.h" +#include "draco/core/macros.h" +#include "draco/draco_features.h" + +namespace draco { + +// Class for storing point specific data about each attribute. In general, +// multiple points stored in a point cloud can share the same attribute value +// and this class provides the necessary mapping between point ids and attribute +// value ids. +class PointAttribute : public GeometryAttribute { + public: + PointAttribute(); + explicit PointAttribute(const GeometryAttribute &att); + + // Make sure the move constructor is defined (needed for better performance + // when new attributes are added to PointCloud). + PointAttribute(PointAttribute &&attribute) = default; + PointAttribute &operator=(PointAttribute &&attribute) = default; + + // Initializes a point attribute. By default the attribute will be set to + // identity mapping between point indices and attribute values. To set custom + // mapping use SetExplicitMapping() function. + void Init(Type attribute_type, int8_t num_components, DataType data_type, + bool normalized, size_t num_attribute_values); + + // Copies attribute data from the provided |src_att| attribute. + void CopyFrom(const PointAttribute &src_att); + + // Prepares the attribute storage for the specified number of entries. + bool Reset(size_t num_attribute_values); + + size_t size() const { return num_unique_entries_; } + AttributeValueIndex mapped_index(PointIndex point_index) const { + if (identity_mapping_) { + return AttributeValueIndex(point_index.value()); + } + return indices_map_[point_index]; + } + DataBuffer *buffer() const { return attribute_buffer_.get(); } + bool is_mapping_identity() const { return identity_mapping_; } + size_t indices_map_size() const { + if (is_mapping_identity()) { + return 0; + } + return indices_map_.size(); + } + + const uint8_t *GetAddressOfMappedIndex(PointIndex point_index) const { + return GetAddress(mapped_index(point_index)); + } + + // Sets the new number of unique attribute entries for the attribute. The + // function resizes the attribute storage to hold |num_attribute_values| + // entries. + // All previous entries with AttributeValueIndex < |num_attribute_values| + // are preserved. Caller needs to ensure that the PointAttribute is still + // valid after the resizing operation (that is, each point is mapped to a + // valid attribute value). + void Resize(size_t new_num_unique_entries); + + // Functions for setting the type of mapping between point indices and + // attribute entry ids. + // This function sets the mapping to implicit, where point indices are equal + // to attribute entry indices. + void SetIdentityMapping() { + identity_mapping_ = true; + indices_map_.clear(); + } + // This function sets the mapping to be explicitly using the indices_map_ + // array that needs to be initialized by the caller. + void SetExplicitMapping(size_t num_points) { + identity_mapping_ = false; + indices_map_.resize(num_points, kInvalidAttributeValueIndex); + } + + // Set an explicit map entry for a specific point index. + void SetPointMapEntry(PointIndex point_index, + AttributeValueIndex entry_index) { + DRACO_DCHECK(!identity_mapping_); + indices_map_[point_index] = entry_index; + } + + // Same as GeometryAttribute::GetValue(), but using point id as the input. + // Mapping to attribute value index is performed automatically. + void GetMappedValue(PointIndex point_index, void *out_data) const { + return GetValue(mapped_index(point_index), out_data); + } + +#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED + // Deduplicate |in_att| values into |this| attribute. |in_att| can be equal + // to |this|. + // Returns -1 if the deduplication failed. + AttributeValueIndex::ValueType DeduplicateValues( + const GeometryAttribute &in_att); + + // Same as above but the values read from |in_att| are sampled with the + // provided offset |in_att_offset|. + AttributeValueIndex::ValueType DeduplicateValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset); +#endif + + // Set attribute transform data for the attribute. The data is used to store + // the type and parameters of the transform that is applied on the attribute + // data (optional). + void SetAttributeTransformData( + std::unique_ptr transform_data) { + attribute_transform_data_ = std::move(transform_data); + } + const AttributeTransformData *GetAttributeTransformData() const { + return attribute_transform_data_.get(); + } + + private: +#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED + template + AttributeValueIndex::ValueType DeduplicateTypedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset); + template + AttributeValueIndex::ValueType DeduplicateFormattedValues( + const GeometryAttribute &in_att, AttributeValueIndex in_att_offset); +#endif + + // Data storage for attribute values. GeometryAttribute itself doesn't own its + // buffer so we need to allocate it here. + std::unique_ptr attribute_buffer_; + + // Mapping between point ids and attribute value ids. + IndexTypeVector indices_map_; + AttributeValueIndex::ValueType num_unique_entries_; + // Flag when the mapping between point ids and attribute values is identity. + bool identity_mapping_; + + // If an attribute contains transformed data (e.g. quantized), we can specify + // the attribute transform here and use it to transform the attribute back to + // its original format. + std::unique_ptr attribute_transform_data_; + + friend struct PointAttributeHasher; +}; + +// Hash functor for the PointAttribute class. +struct PointAttributeHasher { + size_t operator()(const PointAttribute &attribute) const { + GeometryAttributeHasher base_hasher; + size_t hash = base_hasher(attribute); + hash = HashCombine(attribute.identity_mapping_, hash); + hash = HashCombine(attribute.num_unique_entries_, hash); + hash = HashCombine(attribute.indices_map_.size(), hash); + if (!attribute.indices_map_.empty()) { + const uint64_t indices_hash = FingerprintString( + reinterpret_cast(attribute.indices_map_.data()), + attribute.indices_map_.size()); + hash = HashCombine(indices_hash, hash); + } + if (attribute.attribute_buffer_ != nullptr) { + const uint64_t buffer_hash = FingerprintString( + reinterpret_cast(attribute.attribute_buffer_->data()), + attribute.attribute_buffer_->data_size()); + hash = HashCombine(buffer_hash, hash); + } + return hash; + } +}; + +} // namespace draco + +#endif // DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_ diff --git a/contrib/draco/src/draco/attributes/point_attribute_test.cc b/contrib/draco/src/draco/attributes/point_attribute_test.cc new file mode 100644 index 000000000..4ae23fb3c --- /dev/null +++ b/contrib/draco/src/draco/attributes/point_attribute_test.cc @@ -0,0 +1,128 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/attributes/point_attribute.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +class PointAttributeTest : public ::testing::Test { + protected: + PointAttributeTest() {} +}; + +TEST_F(PointAttributeTest, TestCopy) { + // This test verifies that PointAttribute can copy data from another point + // attribute. + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 1, draco::DT_INT32, false, 10); + + for (int32_t i = 0; i < 10; ++i) { + pa.SetAttributeValue(draco::AttributeValueIndex(i), &i); + } + + pa.set_unique_id(12); + + draco::PointAttribute other_pa; + other_pa.CopyFrom(pa); + + draco::PointAttributeHasher hasher; + ASSERT_EQ(hasher(pa), hasher(other_pa)); + ASSERT_EQ(pa.unique_id(), other_pa.unique_id()); + + // The hash function does not actually compute the hash from attribute values, + // so ensure the data got copied correctly as well. + for (int32_t i = 0; i < 10; ++i) { + int32_t data; + other_pa.GetValue(draco::AttributeValueIndex(i), &data); + ASSERT_EQ(data, i); + } +} + +TEST_F(PointAttributeTest, TestGetValueFloat) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + float points[3]; + for (int32_t i = 0; i < 5; ++i) { + points[0] = i * 3.0; + points[1] = (i * 3.0) + 1.0; + points[2] = (i * 3.0) + 2.0; + pa.SetAttributeValue(draco::AttributeValueIndex(i), &points); + } + + for (int32_t i = 0; i < 5; ++i) { + pa.GetValue(draco::AttributeValueIndex(i), &points); + ASSERT_FLOAT_EQ(points[0], i * 3.0); + ASSERT_FLOAT_EQ(points[1], (i * 3.0) + 1.0); + ASSERT_FLOAT_EQ(points[2], (i * 3.0) + 2.0); + } +} + +TEST_F(PointAttributeTest, TestGetArray) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + float points[3]; + for (int32_t i = 0; i < 5; ++i) { + points[0] = i * 3.0; + points[1] = (i * 3.0) + 1.0; + points[2] = (i * 3.0) + 2.0; + pa.SetAttributeValue(draco::AttributeValueIndex(i), &points); + } + + for (int32_t i = 0; i < 5; ++i) { + std::array att_value; + att_value = pa.GetValue(draco::AttributeValueIndex(i)); + ASSERT_FLOAT_EQ(att_value[0], i * 3.0); + ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0); + ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0); + } + for (int32_t i = 0; i < 5; ++i) { + std::array att_value; + EXPECT_TRUE( + (pa.GetValue(draco::AttributeValueIndex(i), &att_value))); + ASSERT_FLOAT_EQ(att_value[0], i * 3.0); + ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0); + ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0); + } +} + +TEST_F(PointAttributeTest, TestArrayReadError) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + float points[3]; + for (int32_t i = 0; i < 5; ++i) { + points[0] = i * 3.0; + points[1] = (i * 3.0) + 1.0; + points[2] = (i * 3.0) + 2.0; + pa.SetAttributeValue(draco::AttributeValueIndex(i), &points); + } + + std::array att_value; + EXPECT_FALSE( + (pa.GetValue(draco::AttributeValueIndex(5), &att_value))); +} + +TEST_F(PointAttributeTest, TestResize) { + draco::PointAttribute pa; + pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5); + ASSERT_EQ(pa.size(), 5); + ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 5); + + pa.Resize(10); + ASSERT_EQ(pa.size(), 10); + ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 10); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc b/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc new file mode 100644 index 000000000..007dd2f43 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_decoder.cc @@ -0,0 +1,127 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/attributes_decoder.h" + +#include "draco/core/varint_decoding.h" + +namespace draco { + +AttributesDecoder::AttributesDecoder() + : point_cloud_decoder_(nullptr), point_cloud_(nullptr) {} + +bool AttributesDecoder::Init(PointCloudDecoder *decoder, PointCloud *pc) { + point_cloud_decoder_ = decoder; + point_cloud_ = pc; + return true; +} + +bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) { + // Decode and create attributes. + uint32_t num_attributes; +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (point_cloud_decoder_->bitstream_version() < + DRACO_BITSTREAM_VERSION(2, 0)) { + if (!in_buffer->Decode(&num_attributes)) { + return false; + } + } else +#endif + { + if (!DecodeVarint(&num_attributes, in_buffer)) { + return false; + } + } + + // Check that decoded number of attributes is valid. + if (num_attributes == 0) { + return false; + } + if (num_attributes > 5 * in_buffer->remaining_size()) { + // The decoded number of attributes is unreasonably high, because at least + // five bytes of attribute descriptor data per attribute are expected. + return false; + } + + // Decode attribute descriptor data. + point_attribute_ids_.resize(num_attributes); + PointCloud *pc = point_cloud_; + for (uint32_t i = 0; i < num_attributes; ++i) { + // Decode attribute descriptor data. + uint8_t att_type, data_type, num_components, normalized; + if (!in_buffer->Decode(&att_type)) { + return false; + } + if (!in_buffer->Decode(&data_type)) { + return false; + } + if (!in_buffer->Decode(&num_components)) { + return false; + } + if (!in_buffer->Decode(&normalized)) { + return false; + } + if (att_type >= GeometryAttribute::NAMED_ATTRIBUTES_COUNT) { + return false; + } + if (data_type == DT_INVALID || data_type >= DT_TYPES_COUNT) { + return false; + } + + // Check decoded attribute descriptor data. + if (num_components == 0) { + return false; + } + + // Add the attribute to the point cloud. + const DataType draco_dt = static_cast(data_type); + GeometryAttribute ga; + ga.Init(static_cast(att_type), nullptr, + num_components, draco_dt, normalized > 0, + DataTypeLength(draco_dt) * num_components, 0); + uint32_t unique_id; +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (point_cloud_decoder_->bitstream_version() < + DRACO_BITSTREAM_VERSION(1, 3)) { + uint16_t custom_id; + if (!in_buffer->Decode(&custom_id)) { + return false; + } + // TODO(draco-eng): Add "custom_id" to attribute metadata. + unique_id = static_cast(custom_id); + ga.set_unique_id(unique_id); + } else +#endif + { + if (!DecodeVarint(&unique_id, in_buffer)) { + return false; + } + ga.set_unique_id(unique_id); + } + const int att_id = pc->AddAttribute( + std::unique_ptr(new PointAttribute(ga))); + pc->attribute(att_id)->set_unique_id(unique_id); + point_attribute_ids_[i] = att_id; + + // Update the inverse map. + if (att_id >= + static_cast(point_attribute_to_local_id_map_.size())) { + point_attribute_to_local_id_map_.resize(att_id + 1, -1); + } + point_attribute_to_local_id_map_[att_id] = i; + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/attributes_decoder.h b/contrib/draco/src/draco/compression/attributes/attributes_decoder.h new file mode 100644 index 000000000..5b2bb2cfe --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_decoder.h @@ -0,0 +1,97 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_ + +#include + +#include "draco/compression/attributes/attributes_decoder_interface.h" +#include "draco/compression/point_cloud/point_cloud_decoder.h" +#include "draco/core/decoder_buffer.h" +#include "draco/draco_features.h" +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +// Base class for decoding one or more attributes that were encoded with a +// matching AttributesEncoder. It is a basic implementation of +// AttributesDecoderInterface that provides functionality that is shared between +// all AttributesDecoders. +class AttributesDecoder : public AttributesDecoderInterface { + public: + AttributesDecoder(); + virtual ~AttributesDecoder() = default; + + // Called after all attribute decoders are created. It can be used to perform + // any custom initialization. + bool Init(PointCloudDecoder *decoder, PointCloud *pc) override; + + // Decodes any attribute decoder specific data from the |in_buffer|. + bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) override; + + int32_t GetAttributeId(int i) const override { + return point_attribute_ids_[i]; + } + int32_t GetNumAttributes() const override { + return static_cast(point_attribute_ids_.size()); + } + PointCloudDecoder *GetDecoder() const override { + return point_cloud_decoder_; + } + + // Decodes attribute data from the source buffer. + bool DecodeAttributes(DecoderBuffer *in_buffer) override { + if (!DecodePortableAttributes(in_buffer)) { + return false; + } + if (!DecodeDataNeededByPortableTransforms(in_buffer)) { + return false; + } + if (!TransformAttributesToOriginalFormat()) { + return false; + } + return true; + } + + protected: + int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const { + const int id_map_size = + static_cast(point_attribute_to_local_id_map_.size()); + if (point_attribute_id >= id_map_size) { + return -1; + } + return point_attribute_to_local_id_map_[point_attribute_id]; + } + virtual bool DecodePortableAttributes(DecoderBuffer *in_buffer) = 0; + virtual bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) { + return true; + } + virtual bool TransformAttributesToOriginalFormat() { return true; } + + private: + // List of attribute ids that need to be decoded with this decoder. + std::vector point_attribute_ids_; + + // Map between point attribute id and the local id (i.e., the inverse of the + // |point_attribute_ids_|. + std::vector point_attribute_to_local_id_map_; + + PointCloudDecoder *point_cloud_decoder_; + PointCloud *point_cloud_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h b/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h new file mode 100644 index 000000000..8e5cf52ac --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_decoder_interface.h @@ -0,0 +1,62 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_ + +#include + +#include "draco/core/decoder_buffer.h" +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +class PointCloudDecoder; + +// Interface class for decoding one or more attributes that were encoded with a +// matching AttributesEncoder. It provides only the basic interface +// that is used by the PointCloudDecoder. The actual decoding must be +// implemented in derived classes using the DecodeAttributes() method. +class AttributesDecoderInterface { + public: + AttributesDecoderInterface() = default; + virtual ~AttributesDecoderInterface() = default; + + // Called after all attribute decoders are created. It can be used to perform + // any custom initialization. + virtual bool Init(PointCloudDecoder *decoder, PointCloud *pc) = 0; + + // Decodes any attribute decoder specific data from the |in_buffer|. + virtual bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) = 0; + + // Decode attribute data from the source buffer. Needs to be implemented by + // the derived classes. + virtual bool DecodeAttributes(DecoderBuffer *in_buffer) = 0; + + virtual int32_t GetAttributeId(int i) const = 0; + virtual int32_t GetNumAttributes() const = 0; + virtual PointCloudDecoder *GetDecoder() const = 0; + + // Returns an attribute containing data processed by the attribute transform. + // (see TransformToPortableFormat() method). This data is guaranteed to be + // same for encoder and decoder and it can be used by predictors. + virtual const PointAttribute *GetPortableAttribute( + int32_t /* point_attribute_id */) { + return nullptr; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc b/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc new file mode 100644 index 000000000..797c62f30 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_encoder.cc @@ -0,0 +1,49 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/attributes_encoder.h" + +#include "draco/core/varint_encoding.h" + +namespace draco { + +AttributesEncoder::AttributesEncoder() + : point_cloud_encoder_(nullptr), point_cloud_(nullptr) {} + +AttributesEncoder::AttributesEncoder(int att_id) : AttributesEncoder() { + AddAttributeId(att_id); +} + +bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) { + point_cloud_encoder_ = encoder; + point_cloud_ = pc; + return true; +} + +bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) { + // Encode data about all attributes. + EncodeVarint(num_attributes(), out_buffer); + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int32_t att_id = point_attribute_ids_[i]; + const PointAttribute *const pa = point_cloud_->attribute(att_id); + out_buffer->Encode(static_cast(pa->attribute_type())); + out_buffer->Encode(static_cast(pa->data_type())); + out_buffer->Encode(static_cast(pa->num_components())); + out_buffer->Encode(static_cast(pa->normalized())); + EncodeVarint(pa->unique_id(), out_buffer); + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/attributes_encoder.h b/contrib/draco/src/draco/compression/attributes/attributes_encoder.h new file mode 100644 index 000000000..9de846ae6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/attributes_encoder.h @@ -0,0 +1,154 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_ + +#include "draco/attributes/point_attribute.h" +#include "draco/core/encoder_buffer.h" +#include "draco/point_cloud/point_cloud.h" + +namespace draco { + +class PointCloudEncoder; + +// Base class for encoding one or more attributes of a PointCloud (or other +// geometry). This base class provides only the basic interface that is used +// by the PointCloudEncoder. +class AttributesEncoder { + public: + AttributesEncoder(); + // Constructs an attribute encoder associated with a given point attribute. + explicit AttributesEncoder(int point_attrib_id); + virtual ~AttributesEncoder() = default; + + // Called after all attribute encoders are created. It can be used to perform + // any custom initialization, including setting up attribute dependencies. + // Note: no data should be encoded in this function, because the decoder may + // process encoders in a different order from the decoder. + virtual bool Init(PointCloudEncoder *encoder, const PointCloud *pc); + + // Encodes data needed by the target attribute decoder. + virtual bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer); + + // Returns a unique identifier of the given encoder type, that is used during + // decoding to construct the corresponding attribute decoder. + virtual uint8_t GetUniqueId() const = 0; + + // Encode attribute data to the target buffer. + virtual bool EncodeAttributes(EncoderBuffer *out_buffer) { + if (!TransformAttributesToPortableFormat()) { + return false; + } + if (!EncodePortableAttributes(out_buffer)) { + return false; + } + // Encode data needed by portable transforms after the attribute is encoded. + // This corresponds to the order in which the data is going to be decoded by + // the decoder. + if (!EncodeDataNeededByPortableTransforms(out_buffer)) { + return false; + } + return true; + } + + // Returns the number of attributes that need to be encoded before the + // specified attribute is encoded. + // Note that the attribute is specified by its point attribute id. + virtual int NumParentAttributes(int32_t /* point_attribute_id */) const { + return 0; + } + + virtual int GetParentAttributeId(int32_t /* point_attribute_id */, + int32_t /* parent_i */) const { + return -1; + } + + // Marks a given attribute as a parent of another attribute. + virtual bool MarkParentAttribute(int32_t /* point_attribute_id */) { + return false; + } + + // Returns an attribute containing data processed by the attribute transform. + // (see TransformToPortableFormat() method). This data is guaranteed to be + // encoded losslessly and it can be safely used for predictors. + virtual const PointAttribute *GetPortableAttribute( + int32_t /* point_attribute_id */) { + return nullptr; + } + + void AddAttributeId(int32_t id) { + point_attribute_ids_.push_back(id); + if (id >= static_cast(point_attribute_to_local_id_map_.size())) { + point_attribute_to_local_id_map_.resize(id + 1, -1); + } + point_attribute_to_local_id_map_[id] = + static_cast(point_attribute_ids_.size()) - 1; + } + + // Sets new attribute point ids (replacing the existing ones). + void SetAttributeIds(const std::vector &point_attribute_ids) { + point_attribute_ids_.clear(); + point_attribute_to_local_id_map_.clear(); + for (int32_t att_id : point_attribute_ids) { + AddAttributeId(att_id); + } + } + + int32_t GetAttributeId(int i) const { return point_attribute_ids_[i]; } + uint32_t num_attributes() const { + return static_cast(point_attribute_ids_.size()); + } + PointCloudEncoder *encoder() const { return point_cloud_encoder_; } + + protected: + // Transforms the input attribute data into a form that should be losslessly + // encoded (transform itself can be lossy). + virtual bool TransformAttributesToPortableFormat() { return true; } + + // Losslessly encodes data of all portable attributes. + // Precondition: All attributes must have been transformed into portable + // format at this point (see TransformAttributesToPortableFormat() method). + virtual bool EncodePortableAttributes(EncoderBuffer *out_buffer) = 0; + + // Encodes any data needed to revert the transform to portable format for each + // attribute (e.g. data needed for dequantization of quantized values). + virtual bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) { + return true; + } + + int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const { + const int id_map_size = + static_cast(point_attribute_to_local_id_map_.size()); + if (point_attribute_id >= id_map_size) { + return -1; + } + return point_attribute_to_local_id_map_[point_attribute_id]; + } + + private: + // List of attribute ids that need to be encoded with this encoder. + std::vector point_attribute_ids_; + + // Map between point attribute id and the local id (i.e., the inverse of the + // |point_attribute_ids_|. + std::vector point_attribute_to_local_id_map_; + + PointCloudEncoder *point_cloud_encoder_; + const PointCloud *point_cloud_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc new file mode 100644 index 000000000..e4d53485d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.cc @@ -0,0 +1,556 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/kd_tree_attributes_decoder.h" + +#include "draco/compression/attributes/kd_tree_attributes_shared.h" +#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h" +#include "draco/compression/point_cloud/algorithms/float_points_tree_decoder.h" +#include "draco/compression/point_cloud/point_cloud_decoder.h" +#include "draco/core/draco_types.h" +#include "draco/core/varint_decoding.h" + +namespace draco { + +// attribute, offset_dimensionality, data_type, data_size, num_components +using AttributeTuple = + std::tuple; + +// Output iterator that is used to decode values directly into the data buffer +// of the modified PointAttribute. +// The extension of this iterator beyond the DT_UINT32 concerns itself only with +// the size of the data for efficiency, not the type. DataType is conveyed in +// but is an unused field populated for any future logic/special casing. +// DT_UINT32 and all other 4-byte types are naturally supported from the size of +// data in the kd tree encoder. DT_UINT16 and DT_UINT8 are supported by way +// of byte copies into a temporary memory buffer. +template +class PointAttributeVectorOutputIterator { + typedef PointAttributeVectorOutputIterator Self; + + public: + PointAttributeVectorOutputIterator( + PointAttributeVectorOutputIterator &&that) = default; + + explicit PointAttributeVectorOutputIterator( + const std::vector &atts) + : attributes_(atts), point_id_(0) { + DRACO_DCHECK_GE(atts.size(), 1); + uint32_t required_decode_bytes = 0; + for (auto index = 0; index < attributes_.size(); index++) { + const AttributeTuple &att = attributes_[index]; + required_decode_bytes = (std::max)(required_decode_bytes, + std::get<3>(att) * std::get<4>(att)); + } + memory_.resize(required_decode_bytes); + data_ = memory_.data(); + } + + const Self &operator++() { + ++point_id_; + return *this; + } + + // We do not want to do ANY copying of this constructor so this particular + // operator is disabled for performance reasons. + // Self operator++(int) { + // Self copy = *this; + // ++point_id_; + // return copy; + // } + + Self &operator*() { return *this; } + // Still needed in some cases. + // TODO(hemmer): remove. + // hardcoded to 3 based on legacy usage. + const Self &operator=(const VectorD &val) { + DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute. + AttributeTuple &att = attributes_[0]; + PointAttribute *attribute = std::get<0>(att); + const uint32_t &offset = std::get<1>(att); + DRACO_DCHECK_EQ(offset, 0); // expected to be zero + attribute->SetAttributeValue(attribute->mapped_index(point_id_), + &val[0] + offset); + return *this; + } + // Additional operator taking std::vector as argument. + const Self &operator=(const std::vector &val) { + for (auto index = 0; index < attributes_.size(); index++) { + AttributeTuple &att = attributes_[index]; + PointAttribute *attribute = std::get<0>(att); + const uint32_t &offset = std::get<1>(att); + const uint32_t &data_size = std::get<3>(att); + const uint32_t &num_components = std::get<4>(att); + const uint32_t *data_source = val.data() + offset; + if (data_size < 4) { // handle uint16_t, uint8_t + // selectively copy data bytes + uint8_t *data_counter = data_; + for (uint32_t index = 0; index < num_components; + index += 1, data_counter += data_size) { + std::memcpy(data_counter, data_source + index, data_size); + } + // redirect to copied data + data_source = reinterpret_cast(data_); + } + const AttributeValueIndex avi = attribute->mapped_index(point_id_); + if (avi >= static_cast(attribute->size())) { + return *this; + } + attribute->SetAttributeValue(avi, data_source); + } + return *this; + } + + private: + // preallocated memory for buffering different data sizes. Never reallocated. + std::vector memory_; + uint8_t *data_; + std::vector attributes_; + PointIndex point_id_; + + // NO COPY + PointAttributeVectorOutputIterator( + const PointAttributeVectorOutputIterator &that) = delete; + PointAttributeVectorOutputIterator &operator=( + PointAttributeVectorOutputIterator const &) = delete; +}; + +KdTreeAttributesDecoder::KdTreeAttributesDecoder() {} + +bool KdTreeAttributesDecoder::DecodePortableAttributes( + DecoderBuffer *in_buffer) { + if (in_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 3)) { + // Old bitstream does everything in the + // DecodeDataNeededByPortableTransforms() method. + return true; + } + uint8_t compression_level = 0; + if (!in_buffer->Decode(&compression_level)) { + return false; + } + const int32_t num_points = GetDecoder()->point_cloud()->num_points(); + + // Decode data using the kd tree decoding into integer (portable) attributes. + // We first need to go over all attributes and create a new portable storage + // for those attributes that need it (floating point attributes that have to + // be dequantized after decoding). + + const int num_attributes = GetNumAttributes(); + uint32_t total_dimensionality = 0; // position is a required dimension + std::vector atts(num_attributes); + + for (int i = 0; i < GetNumAttributes(); ++i) { + const int att_id = GetAttributeId(i); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + // All attributes have the same number of values and identity mapping + // between PointIndex and AttributeValueIndex. + att->Reset(num_points); + att->SetIdentityMapping(); + + PointAttribute *target_att = nullptr; + if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 || + att->data_type() == DT_UINT8) { + // We can decode to these attributes directly. + target_att = att; + } else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 || + att->data_type() == DT_INT8) { + // Prepare storage for data that is used to convert unsigned values back + // to the signed ones. + for (int c = 0; c < att->num_components(); ++c) { + min_signed_values_.push_back(0); + } + target_att = att; + } else if (att->data_type() == DT_FLOAT32) { + // Create a portable attribute that will hold the decoded data. We will + // dequantize the decoded data to the final attribute later on. + const int num_components = att->num_components(); + GeometryAttribute va; + va.Init(att->attribute_type(), nullptr, num_components, DT_UINT32, false, + num_components * DataTypeLength(DT_UINT32), 0); + std::unique_ptr port_att(new PointAttribute(va)); + port_att->SetIdentityMapping(); + port_att->Reset(num_points); + quantized_portable_attributes_.push_back(std::move(port_att)); + target_att = quantized_portable_attributes_.back().get(); + } else { + // Unsupported type. + return false; + } + // Add attribute to the output iterator used by the core algorithm. + const DataType data_type = target_att->data_type(); + const uint32_t data_size = (std::max)(0, DataTypeLength(data_type)); + const uint32_t num_components = target_att->num_components(); + atts[i] = std::make_tuple(target_att, total_dimensionality, data_type, + data_size, num_components); + total_dimensionality += num_components; + } + PointAttributeVectorOutputIterator out_it(atts); + + switch (compression_level) { + case 0: { + DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 1: { + DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 2: { + DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 3: { + DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 4: { + DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 5: { + DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 6: { + DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + default: + return false; + } + return true; +} + +bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms( + DecoderBuffer *in_buffer) { + if (in_buffer->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 3)) { + // Decode quantization data for each attribute that need it. + // TODO(ostava): This should be moved to AttributeQuantizationTransform. + std::vector min_value; + for (int i = 0; i < GetNumAttributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + GetDecoder()->point_cloud()->attribute(att_id); + if (att->data_type() == DT_FLOAT32) { + const int num_components = att->num_components(); + min_value.resize(num_components); + if (!in_buffer->Decode(&min_value[0], sizeof(float) * num_components)) { + return false; + } + float max_value_dif; + if (!in_buffer->Decode(&max_value_dif)) { + return false; + } + uint8_t quantization_bits; + if (!in_buffer->Decode(&quantization_bits) || quantization_bits > 31) { + return false; + } + AttributeQuantizationTransform transform; + if (!transform.SetParameters(quantization_bits, min_value.data(), + num_components, max_value_dif)) { + return false; + } + const int num_transforms = + static_cast(attribute_quantization_transforms_.size()); + if (!transform.TransferToAttribute( + quantized_portable_attributes_[num_transforms].get())) { + return false; + } + attribute_quantization_transforms_.push_back(transform); + } + } + + // Decode transform data for signed integer attributes. + for (int i = 0; i < min_signed_values_.size(); ++i) { + int32_t val; + if (!DecodeVarint(&val, in_buffer)) { + return false; + } + min_signed_values_[i] = val; + } + return true; + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + // Handle old bitstream + // Figure out the total dimensionality of the point cloud + const uint32_t attribute_count = GetNumAttributes(); + uint32_t total_dimensionality = 0; // position is a required dimension + std::vector atts(attribute_count); + for (auto attribute_index = 0; + static_cast(attribute_index) < attribute_count; + attribute_index += 1) // increment the dimensionality as needed... + { + const int att_id = GetAttributeId(attribute_index); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + const DataType data_type = att->data_type(); + const uint32_t data_size = (std::max)(0, DataTypeLength(data_type)); + const uint32_t num_components = att->num_components(); + if (data_size > 4) { + return false; + } + + atts[attribute_index] = std::make_tuple( + att, total_dimensionality, data_type, data_size, num_components); + // everything is treated as 32bit in the encoder. + total_dimensionality += num_components; + } + + const int att_id = GetAttributeId(0); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + att->SetIdentityMapping(); + // Decode method + uint8_t method; + if (!in_buffer->Decode(&method)) { + return false; + } + if (method == KdTreeAttributesEncodingMethod::kKdTreeQuantizationEncoding) { + uint8_t compression_level = 0; + if (!in_buffer->Decode(&compression_level)) { + return false; + } + uint32_t num_points = 0; + if (!in_buffer->Decode(&num_points)) { + return false; + } + att->Reset(num_points); + FloatPointsTreeDecoder decoder; + decoder.set_num_points_from_header(num_points); + PointAttributeVectorOutputIterator out_it(atts); + if (!decoder.DecodePointCloud(in_buffer, out_it)) { + return false; + } + } else if (method == KdTreeAttributesEncodingMethod::kKdTreeIntegerEncoding) { + uint8_t compression_level = 0; + if (!in_buffer->Decode(&compression_level)) { + return false; + } + if (6 < compression_level) { + DRACO_LOGE( + "KdTreeAttributesDecoder: compression level %i not supported.\n", + compression_level); + return false; + } + + uint32_t num_points; + if (!in_buffer->Decode(&num_points)) { + return false; + } + + for (auto attribute_index = 0; + static_cast(attribute_index) < attribute_count; + attribute_index += 1) { + const int att_id = GetAttributeId(attribute_index); + PointAttribute *const attr = + GetDecoder()->point_cloud()->attribute(att_id); + attr->Reset(num_points); + attr->SetIdentityMapping(); + }; + + PointAttributeVectorOutputIterator out_it(atts); + + switch (compression_level) { + case 0: { + DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 1: { + DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 2: { + DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 3: { + DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 4: { + DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 5: { + DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + case 6: { + DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality); + if (!decoder.DecodePoints(in_buffer, out_it)) { + return false; + } + break; + } + default: + return false; + } + } else { + // Invalid method. + return false; + } + return true; +#else + return false; +#endif +} + +template +bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType( + PointAttribute *att, int num_processed_signed_components) { + typedef typename std::make_unsigned::type UnsignedType; + std::vector unsigned_val(att->num_components()); + std::vector signed_val(att->num_components()); + + for (AttributeValueIndex avi(0); avi < static_cast(att->size()); + ++avi) { + att->GetValue(avi, &unsigned_val[0]); + for (int c = 0; c < att->num_components(); ++c) { + // Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for + // smaller data types. + signed_val[c] = static_cast( + static_cast(unsigned_val[c]) + + min_signed_values_[num_processed_signed_components + c]); + } + att->SetAttributeValue(avi, &signed_val[0]); + } + return true; +} + +bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() { + if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) { + return true; + } + int num_processed_quantized_attributes = 0; + int num_processed_signed_components = 0; + // Dequantize attributes that needed it. + for (int i = 0; i < GetNumAttributes(); ++i) { + const int att_id = GetAttributeId(i); + PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id); + if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 || + att->data_type() == DT_INT8) { + std::vector unsigned_val(att->num_components()); + std::vector signed_val(att->num_components()); + // Values are stored as unsigned in the attribute, make them signed again. + if (att->data_type() == DT_INT32) { + if (!TransformAttributeBackToSignedType( + att, num_processed_signed_components)) { + return false; + } + } else if (att->data_type() == DT_INT16) { + if (!TransformAttributeBackToSignedType( + att, num_processed_signed_components)) { + return false; + } + } else if (att->data_type() == DT_INT8) { + if (!TransformAttributeBackToSignedType( + att, num_processed_signed_components)) { + return false; + } + } + num_processed_signed_components += att->num_components(); + } else if (att->data_type() == DT_FLOAT32) { + // TODO(ostava): This code should be probably moved out to attribute + // transform and shared with the SequentialQuantizationAttributeDecoder. + + const PointAttribute *const src_att = + quantized_portable_attributes_[num_processed_quantized_attributes] + .get(); + + const AttributeQuantizationTransform &transform = + attribute_quantization_transforms_ + [num_processed_quantized_attributes]; + + num_processed_quantized_attributes++; + + if (GetDecoder()->options()->GetAttributeBool( + att->attribute_type(), "skip_attribute_transform", false)) { + // Attribute transform should not be performed. In this case, we replace + // the output geometry attribute with the portable attribute. + // TODO(ostava): We can potentially avoid this copy by introducing a new + // mechanism that would allow to use the final attributes as portable + // attributes for predictors that may need them. + att->CopyFrom(*src_att); + continue; + } + + // Convert all quantized values back to floats. + const int32_t max_quantized_value = + (1u << static_cast(transform.quantization_bits())) - 1; + const int num_components = att->num_components(); + const int entry_size = sizeof(float) * num_components; + const std::unique_ptr att_val(new float[num_components]); + int quant_val_id = 0; + int out_byte_pos = 0; + Dequantizer dequantizer; + if (!dequantizer.Init(transform.range(), max_quantized_value)) { + return false; + } + const uint32_t *const portable_attribute_data = + reinterpret_cast( + src_att->GetAddress(AttributeValueIndex(0))); + for (uint32_t i = 0; i < src_att->size(); ++i) { + for (int c = 0; c < num_components; ++c) { + float value = dequantizer.DequantizeFloat( + portable_attribute_data[quant_val_id++]); + value = value + transform.min_value(c); + att_val[c] = value; + } + // Store the floating point value into the attribute buffer. + att->buffer()->Write(out_byte_pos, att_val.get(), entry_size); + out_byte_pos += entry_size; + } + } + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h new file mode 100644 index 000000000..87338d6b0 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_decoder.h @@ -0,0 +1,46 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/attributes_decoder.h" + +namespace draco { + +// Decodes attributes encoded with the KdTreeAttributesEncoder. +class KdTreeAttributesDecoder : public AttributesDecoder { + public: + KdTreeAttributesDecoder(); + + protected: + bool DecodePortableAttributes(DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override; + bool TransformAttributesToOriginalFormat() override; + + private: + template + bool TransformAttributeBackToSignedType(PointAttribute *att, + int num_processed_signed_components); + + std::vector + attribute_quantization_transforms_; + std::vector min_signed_values_; + std::vector> quantized_portable_attributes_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc new file mode 100644 index 000000000..b70deb9e0 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.cc @@ -0,0 +1,305 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/kd_tree_attributes_encoder.h" + +#include "draco/compression/attributes/kd_tree_attributes_shared.h" +#include "draco/compression/attributes/point_d_vector.h" +#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h" +#include "draco/compression/point_cloud/algorithms/float_points_tree_encoder.h" +#include "draco/compression/point_cloud/point_cloud_encoder.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +KdTreeAttributesEncoder::KdTreeAttributesEncoder() : num_components_(0) {} + +KdTreeAttributesEncoder::KdTreeAttributesEncoder(int att_id) + : AttributesEncoder(att_id), num_components_(0) {} + +bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() { + // Convert any of the input attributes into a format that can be processed by + // the kd tree encoder (quantization of floating attributes for now). + const size_t num_points = encoder()->point_cloud()->num_points(); + int num_components = 0; + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + encoder()->point_cloud()->attribute(att_id); + num_components += att->num_components(); + } + num_components_ = num_components; + + // Go over all attributes and quantize them if needed. + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + encoder()->point_cloud()->attribute(att_id); + if (att->data_type() == DT_FLOAT32) { + // Quantization path. + AttributeQuantizationTransform attribute_quantization_transform; + const int quantization_bits = encoder()->options()->GetAttributeInt( + att_id, "quantization_bits", -1); + if (quantization_bits < 1) { + return false; + } + if (encoder()->options()->IsAttributeOptionSet(att_id, + "quantization_origin") && + encoder()->options()->IsAttributeOptionSet(att_id, + "quantization_range")) { + // Quantization settings are explicitly specified in the provided + // options. + std::vector quantization_origin(att->num_components()); + encoder()->options()->GetAttributeVector(att_id, "quantization_origin", + att->num_components(), + &quantization_origin[0]); + const float range = encoder()->options()->GetAttributeFloat( + att_id, "quantization_range", 1.f); + attribute_quantization_transform.SetParameters( + quantization_bits, quantization_origin.data(), + att->num_components(), range); + } else { + // Compute quantization settings from the attribute values. + if (!attribute_quantization_transform.ComputeParameters( + *att, quantization_bits)) { + return false; + } + } + attribute_quantization_transforms_.push_back( + attribute_quantization_transform); + // Store the quantized attribute in an array that will be used when we do + // the actual encoding of the data. + auto portable_att = + attribute_quantization_transform.InitTransformedAttribute(*att, + num_points); + attribute_quantization_transform.TransformAttribute(*att, {}, + portable_att.get()); + quantized_portable_attributes_.push_back(std::move(portable_att)); + } else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 || + att->data_type() == DT_INT8) { + // For signed types, find the minimum value for each component. These + // values are going to be used to transform the attribute values to + // unsigned integers that can be processed by the core kd tree algorithm. + std::vector min_value(att->num_components(), + std::numeric_limits::max()); + std::vector act_value(att->num_components()); + for (AttributeValueIndex avi(0); avi < static_cast(att->size()); + ++avi) { + att->ConvertValue(avi, &act_value[0]); + for (int c = 0; c < att->num_components(); ++c) { + if (min_value[c] > act_value[c]) { + min_value[c] = act_value[c]; + } + } + } + for (int c = 0; c < att->num_components(); ++c) { + min_signed_values_.push_back(min_value[c]); + } + } + } + return true; +} + +bool KdTreeAttributesEncoder::EncodeDataNeededByPortableTransforms( + EncoderBuffer *out_buffer) { + // Store quantization settings for all attributes that need it. + for (int i = 0; i < attribute_quantization_transforms_.size(); ++i) { + attribute_quantization_transforms_[i].EncodeParameters(out_buffer); + } + + // Encode data needed for transforming signed integers to unsigned ones. + for (int i = 0; i < min_signed_values_.size(); ++i) { + EncodeVarint(min_signed_values_[i], out_buffer); + } + return true; +} + +bool KdTreeAttributesEncoder::EncodePortableAttributes( + EncoderBuffer *out_buffer) { + // Encode the data using the kd tree encoder algorithm. The data is first + // copied to a PointDVector that provides all the API expected by the core + // encoding algorithm. + + // We limit the maximum value of compression_level to 6 as we don't currently + // have viable algorithms for higher compression levels. + uint8_t compression_level = + std::min(10 - encoder()->options()->GetSpeed(), 6); + DRACO_DCHECK_LE(compression_level, 6); + + if (compression_level == 6 && num_components_ > 15) { + // Don't use compression level for CL >= 6. Axis selection is currently + // encoded using 4 bits. + compression_level = 5; + } + + out_buffer->Encode(compression_level); + + // Init PointDVector. The number of dimensions is equal to the total number + // of dimensions across all attributes. + const int num_points = encoder()->point_cloud()->num_points(); + PointDVector point_vector(num_points, num_components_); + + int num_processed_components = 0; + int num_processed_quantized_attributes = 0; + int num_processed_signed_components = 0; + // Copy data to the point vector. + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int att_id = GetAttributeId(i); + const PointAttribute *const att = + encoder()->point_cloud()->attribute(att_id); + const PointAttribute *source_att = nullptr; + if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 || + att->data_type() == DT_UINT8 || att->data_type() == DT_INT32 || + att->data_type() == DT_INT16 || att->data_type() == DT_INT8) { + // Use the original attribute. + source_att = att; + } else if (att->data_type() == DT_FLOAT32) { + // Use the portable (quantized) attribute instead. + source_att = + quantized_portable_attributes_[num_processed_quantized_attributes] + .get(); + num_processed_quantized_attributes++; + } else { + // Unsupported data type. + return false; + } + + if (source_att == nullptr) { + return false; + } + + // Copy source_att to the vector. + if (source_att->data_type() == DT_UINT32) { + // If the data type is the same as the one used by the point vector, we + // can directly copy individual elements. + for (PointIndex pi(0); pi < num_points; ++pi) { + const AttributeValueIndex avi = source_att->mapped_index(pi); + const uint8_t *const att_value_address = source_att->GetAddress(avi); + point_vector.CopyAttribute(source_att->num_components(), + num_processed_components, pi.value(), + att_value_address); + } + } else if (source_att->data_type() == DT_INT32 || + source_att->data_type() == DT_INT16 || + source_att->data_type() == DT_INT8) { + // Signed values need to be converted to unsigned before they are stored + // in the point vector. + std::vector signed_point(source_att->num_components()); + std::vector unsigned_point(source_att->num_components()); + for (PointIndex pi(0); pi < num_points; ++pi) { + const AttributeValueIndex avi = source_att->mapped_index(pi); + source_att->ConvertValue(avi, &signed_point[0]); + for (int c = 0; c < source_att->num_components(); ++c) { + unsigned_point[c] = + signed_point[c] - + min_signed_values_[num_processed_signed_components + c]; + } + + point_vector.CopyAttribute(source_att->num_components(), + num_processed_components, pi.value(), + &unsigned_point[0]); + } + num_processed_signed_components += source_att->num_components(); + } else { + // If the data type of the attribute is different, we have to convert the + // value before we put it to the point vector. + std::vector point(source_att->num_components()); + for (PointIndex pi(0); pi < num_points; ++pi) { + const AttributeValueIndex avi = source_att->mapped_index(pi); + source_att->ConvertValue(avi, &point[0]); + point_vector.CopyAttribute(source_att->num_components(), + num_processed_components, pi.value(), + point.data()); + } + } + num_processed_components += source_att->num_components(); + } + + // Compute the maximum bit length needed for the kd tree encoding. + int num_bits = 0; + const uint32_t *data = point_vector[0]; + for (int i = 0; i < num_points * num_components_; ++i) { + if (data[i] > 0) { + const int msb = MostSignificantBit(data[i]) + 1; + if (msb > num_bits) { + num_bits = msb; + } + } + } + + switch (compression_level) { + case 6: { + DynamicIntegerPointsKdTreeEncoder<6> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 5: { + DynamicIntegerPointsKdTreeEncoder<5> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 4: { + DynamicIntegerPointsKdTreeEncoder<4> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 3: { + DynamicIntegerPointsKdTreeEncoder<3> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 2: { + DynamicIntegerPointsKdTreeEncoder<2> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 1: { + DynamicIntegerPointsKdTreeEncoder<1> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + case 0: { + DynamicIntegerPointsKdTreeEncoder<0> points_encoder(num_components_); + if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(), + num_bits, out_buffer)) { + return false; + } + break; + } + // Compression level and/or encoding speed seem wrong. + default: + return false; + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h new file mode 100644 index 000000000..80748e0bf --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_encoder.h @@ -0,0 +1,51 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/attributes_encoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +// Encodes all attributes of a given PointCloud using one of the available +// Kd-tree compression methods. +// See compression/point_cloud/point_cloud_kd_tree_encoder.h for more details. +class KdTreeAttributesEncoder : public AttributesEncoder { + public: + KdTreeAttributesEncoder(); + explicit KdTreeAttributesEncoder(int att_id); + + uint8_t GetUniqueId() const override { return KD_TREE_ATTRIBUTE_ENCODER; } + + protected: + bool TransformAttributesToPortableFormat() override; + bool EncodePortableAttributes(EncoderBuffer *out_buffer) override; + bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override; + + private: + std::vector + attribute_quantization_transforms_; + // Min signed values are used to transform signed integers into unsigned ones + // (by subtracting the min signed value for each component). + std::vector min_signed_values_; + std::vector> quantized_portable_attributes_; + int num_components_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h new file mode 100644 index 000000000..94841a91d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/kd_tree_attributes_shared.h @@ -0,0 +1,28 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_ + +namespace draco { + +// Defines types of kD-tree compression +enum KdTreeAttributesEncodingMethod { + kKdTreeQuantizationEncoding = 0, + kKdTreeIntegerEncoding +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/attributes/linear_sequencer.h b/contrib/draco/src/draco/compression/attributes/linear_sequencer.h new file mode 100644 index 000000000..7d9b52641 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/linear_sequencer.h @@ -0,0 +1,51 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_ + +#include "draco/compression/attributes/points_sequencer.h" + +namespace draco { + +// A simple sequencer that generates a linear sequence [0, num_points - 1]. +// I.e., the order of the points is preserved for the input data. +class LinearSequencer : public PointsSequencer { + public: + explicit LinearSequencer(int32_t num_points) : num_points_(num_points) {} + + bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) override { + attribute->SetIdentityMapping(); + return true; + } + + protected: + bool GenerateSequenceInternal() override { + if (num_points_ < 0) { + return false; + } + out_point_ids()->resize(num_points_); + for (int i = 0; i < num_points_; ++i) { + out_point_ids()->at(i) = PointIndex(i); + } + return true; + } + + private: + int32_t num_points_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h b/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h new file mode 100644 index 000000000..9a358e447 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/mesh_attribute_indices_encoding_data.h @@ -0,0 +1,58 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_ + +#include + +#include + +#include "draco/attributes/geometry_indices.h" + +namespace draco { + +// Data used for encoding and decoding of mesh attributes. +struct MeshAttributeIndicesEncodingData { + MeshAttributeIndicesEncodingData() : num_values(0) {} + + void Init(int num_vertices) { + vertex_to_encoded_attribute_value_index_map.resize(num_vertices); + + // We expect to store one value for each vertex. + encoded_attribute_value_index_to_corner_map.reserve(num_vertices); + } + + // Array for storing the corner ids in the order their associated attribute + // entries were encoded/decoded. For every encoded attribute value entry we + // store exactly one corner. I.e., this is the mapping between an encoded + // attribute entry ids and corner ids. This map is needed for example by + // prediction schemes. Note that not all corners are included in this map, + // e.g., if multiple corners share the same attribute value, only one of these + // corners will be usually included. + std::vector encoded_attribute_value_index_to_corner_map; + + // Map for storing encoding order of attribute entries for each vertex. + // i.e. Mapping between vertices and their corresponding attribute entry ids + // that are going to be used by the decoder. + // -1 if an attribute entry hasn't been encoded/decoded yet. + std::vector vertex_to_encoded_attribute_value_index_map; + + // Total number of encoded/decoded attribute entries. + int num_values; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_ diff --git a/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h b/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h new file mode 100644 index 000000000..8a6f25b66 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/normal_compression_utils.h @@ -0,0 +1,360 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities for converting unit vectors to octahedral coordinates and back. +// For more details about octahedral coordinates, see for example Cigolle +// et al.'14 “A Survey of Efficient Representations for Independent Unit +// Vectors”. +// +// In short this is motivated by an octahedron inscribed into a sphere. The +// direction of the normal vector can be defined by a point on the octahedron. +// On the right hemisphere (x > 0) this point is projected onto the x = 0 plane, +// that is, the right side of the octahedron forms a diamond like shape. The +// left side of the octahedron is also projected onto the x = 0 plane, however, +// in this case we flap the triangles of the diamond outward. Afterwards we +// shift the resulting square such that all values are positive. +// +// Important values in this file: +// * q: number of quantization bits +// * max_quantized_value: the max value representable with q bits (odd) +// * max_value: max value of the diamond = max_quantized_value - 1 (even) +// * center_value: center of the diamond after shift +// +// Note that the parameter space is somewhat periodic, e.g. (0, 0) == +// (max_value, max_value), which is also why the diamond is one smaller than the +// maximal representable value in order to have an odd range of values. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_ + +#include + +#include +#include + +#include "draco/core/macros.h" + +namespace draco { + +class OctahedronToolBox { + public: + OctahedronToolBox() + : quantization_bits_(-1), + max_quantized_value_(-1), + max_value_(-1), + dequantization_scale_(1.f), + center_value_(-1) {} + + bool SetQuantizationBits(int32_t q) { + if (q < 2 || q > 30) { + return false; + } + quantization_bits_ = q; + max_quantized_value_ = (1 << quantization_bits_) - 1; + max_value_ = max_quantized_value_ - 1; + dequantization_scale_ = 2.f / max_value_; + center_value_ = max_value_ / 2; + return true; + } + bool IsInitialized() const { return quantization_bits_ != -1; } + + // Convert all edge points in the top left and bottom right quadrants to + // their corresponding position in the bottom left and top right quadrants. + // Convert all corner edge points to the top right corner. + inline void CanonicalizeOctahedralCoords(int32_t s, int32_t t, int32_t *out_s, + int32_t *out_t) const { + if ((s == 0 && t == 0) || (s == 0 && t == max_value_) || + (s == max_value_ && t == 0)) { + s = max_value_; + t = max_value_; + } else if (s == 0 && t > center_value_) { + t = center_value_ - (t - center_value_); + } else if (s == max_value_ && t < center_value_) { + t = center_value_ + (center_value_ - t); + } else if (t == max_value_ && s < center_value_) { + s = center_value_ + (center_value_ - s); + } else if (t == 0 && s > center_value_) { + s = center_value_ - (s - center_value_); + } + + *out_s = s; + *out_t = t; + } + + // Converts an integer vector to octahedral coordinates. + // Precondition: |int_vec| abs sum must equal center value. + inline void IntegerVectorToQuantizedOctahedralCoords(const int32_t *int_vec, + int32_t *out_s, + int32_t *out_t) const { + DRACO_DCHECK_EQ( + std::abs(int_vec[0]) + std::abs(int_vec[1]) + std::abs(int_vec[2]), + center_value_); + int32_t s, t; + if (int_vec[0] >= 0) { + // Right hemisphere. + s = (int_vec[1] + center_value_); + t = (int_vec[2] + center_value_); + } else { + // Left hemisphere. + if (int_vec[1] < 0) { + s = std::abs(int_vec[2]); + } else { + s = (max_value_ - std::abs(int_vec[2])); + } + if (int_vec[2] < 0) { + t = std::abs(int_vec[1]); + } else { + t = (max_value_ - std::abs(int_vec[1])); + } + } + CanonicalizeOctahedralCoords(s, t, out_s, out_t); + } + + template + void FloatVectorToQuantizedOctahedralCoords(const T *vector, int32_t *out_s, + int32_t *out_t) const { + const double abs_sum = std::abs(static_cast(vector[0])) + + std::abs(static_cast(vector[1])) + + std::abs(static_cast(vector[2])); + + // Adjust values such that abs sum equals 1. + double scaled_vector[3]; + if (abs_sum > 1e-6) { + // Scale needed to project the vector to the surface of an octahedron. + const double scale = 1.0 / abs_sum; + scaled_vector[0] = vector[0] * scale; + scaled_vector[1] = vector[1] * scale; + scaled_vector[2] = vector[2] * scale; + } else { + scaled_vector[0] = 1.0; + scaled_vector[1] = 0; + scaled_vector[2] = 0; + } + + // Scale vector such that the sum equals the center value. + int32_t int_vec[3]; + int_vec[0] = + static_cast(floor(scaled_vector[0] * center_value_ + 0.5)); + int_vec[1] = + static_cast(floor(scaled_vector[1] * center_value_ + 0.5)); + // Make sure the sum is exactly the center value. + int_vec[2] = center_value_ - std::abs(int_vec[0]) - std::abs(int_vec[1]); + if (int_vec[2] < 0) { + // If the sum of first two coordinates is too large, we need to decrease + // the length of one of the coordinates. + if (int_vec[1] > 0) { + int_vec[1] += int_vec[2]; + } else { + int_vec[1] -= int_vec[2]; + } + int_vec[2] = 0; + } + // Take care of the sign. + if (scaled_vector[2] < 0) { + int_vec[2] *= -1; + } + + IntegerVectorToQuantizedOctahedralCoords(int_vec, out_s, out_t); + } + + // Normalize |vec| such that its abs sum is equal to the center value; + template + void CanonicalizeIntegerVector(T *vec) const { + static_assert(std::is_integral::value, "T must be an integral type."); + static_assert(std::is_signed::value, "T must be a signed type."); + const int64_t abs_sum = static_cast(std::abs(vec[0])) + + static_cast(std::abs(vec[1])) + + static_cast(std::abs(vec[2])); + + if (abs_sum == 0) { + vec[0] = center_value_; // vec[1] == v[2] == 0 + } else { + vec[0] = + (static_cast(vec[0]) * static_cast(center_value_)) / + abs_sum; + vec[1] = + (static_cast(vec[1]) * static_cast(center_value_)) / + abs_sum; + if (vec[2] >= 0) { + vec[2] = center_value_ - std::abs(vec[0]) - std::abs(vec[1]); + } else { + vec[2] = -(center_value_ - std::abs(vec[0]) - std::abs(vec[1])); + } + } + } + + inline void QuantizedOctahedralCoordsToUnitVector(int32_t in_s, int32_t in_t, + float *out_vector) const { + OctahedralCoordsToUnitVector(in_s * dequantization_scale_ - 1.f, + in_t * dequantization_scale_ - 1.f, + out_vector); + } + + // |s| and |t| are expected to be signed values. + inline bool IsInDiamond(const int32_t &s, const int32_t &t) const { + // Expect center already at origin. + DRACO_DCHECK_LE(s, center_value_); + DRACO_DCHECK_LE(t, center_value_); + DRACO_DCHECK_GE(s, -center_value_); + DRACO_DCHECK_GE(t, -center_value_); + return std::abs(s) + std::abs(t) <= center_value_; + } + + void InvertDiamond(int32_t *s, int32_t *t) const { + // Expect center already at origin. + DRACO_DCHECK_LE(*s, center_value_); + DRACO_DCHECK_LE(*t, center_value_); + DRACO_DCHECK_GE(*s, -center_value_); + DRACO_DCHECK_GE(*t, -center_value_); + int32_t sign_s = 0; + int32_t sign_t = 0; + if (*s >= 0 && *t >= 0) { + sign_s = 1; + sign_t = 1; + } else if (*s <= 0 && *t <= 0) { + sign_s = -1; + sign_t = -1; + } else { + sign_s = (*s > 0) ? 1 : -1; + sign_t = (*t > 0) ? 1 : -1; + } + + const int32_t corner_point_s = sign_s * center_value_; + const int32_t corner_point_t = sign_t * center_value_; + *s = 2 * *s - corner_point_s; + *t = 2 * *t - corner_point_t; + if (sign_s * sign_t >= 0) { + int32_t temp = *s; + *s = -*t; + *t = -temp; + } else { + std::swap(*s, *t); + } + *s = (*s + corner_point_s) / 2; + *t = (*t + corner_point_t) / 2; + } + + void InvertDirection(int32_t *s, int32_t *t) const { + // Expect center already at origin. + DRACO_DCHECK_LE(*s, center_value_); + DRACO_DCHECK_LE(*t, center_value_); + DRACO_DCHECK_GE(*s, -center_value_); + DRACO_DCHECK_GE(*t, -center_value_); + *s *= -1; + *t *= -1; + this->InvertDiamond(s, t); + } + + // For correction values. + int32_t ModMax(int32_t x) const { + if (x > this->center_value()) { + return x - this->max_quantized_value(); + } + if (x < -this->center_value()) { + return x + this->max_quantized_value(); + } + return x; + } + + // For correction values. + int32_t MakePositive(int32_t x) const { + DRACO_DCHECK_LE(x, this->center_value() * 2); + if (x < 0) { + return x + this->max_quantized_value(); + } + return x; + } + + int32_t quantization_bits() const { return quantization_bits_; } + int32_t max_quantized_value() const { return max_quantized_value_; } + int32_t max_value() const { return max_value_; } + int32_t center_value() const { return center_value_; } + + private: + inline void OctahedralCoordsToUnitVector(float in_s_scaled, float in_t_scaled, + float *out_vector) const { + // Background about the encoding: + // A normal is encoded in a normalized space depicted below. The + // encoding correponds to an octahedron that is unwrapped to a 2D plane. + // During encoding, a normal is projected to the surface of the octahedron + // and the projection is then unwrapped to the 2D plane. Decoding is the + // reverse of this process. + // All points in the central diamond are located on triangles on the + // right "hemisphere" of the octahedron while all points outside of the + // diamond are on the left hemisphere (basically, they would have to be + // wrapped along the diagonal edges to form the octahedron). The central + // point corresponds to the right most vertex of the octahedron and all + // corners of the plane correspond to the left most vertex of the + // octahedron. + // + // t + // ^ *-----*-----* + // | | /|\ | + // | / | \ | + // | / | \ | + // | / | \ | + // *-----*---- * + // | \ | / | + // | \ | / | + // | \ | / | + // | \|/ | + // *-----*-----* --> s + + // Note that the input |in_s_scaled| and |in_t_scaled| are already scaled to + // <-1, 1> range. This way, the central point is at coordinate (0, 0). + float y = in_s_scaled; + float z = in_t_scaled; + + // Remaining coordinate can be computed by projecting the (y, z) values onto + // the surface of the octahedron. + const float x = 1.f - abs(y) - abs(z); + + // |x| is essentially a signed distance from the diagonal edges of the + // diamond shown on the figure above. It is positive for all points in the + // diamond (right hemisphere) and negative for all points outside the + // diamond (left hemisphere). For all points on the left hemisphere we need + // to update their (y, z) coordinates to account for the wrapping along + // the edges of the diamond. + float x_offset = -x; + x_offset = x_offset < 0 ? 0 : x_offset; + + // This will do nothing for the points on the right hemisphere but it will + // mirror the (y, z) location along the nearest diagonal edge of the + // diamond. + y += y < 0 ? x_offset : -x_offset; + z += z < 0 ? x_offset : -x_offset; + + // Normalize the computed vector. + const float norm_squared = x * x + y * y + z * z; + if (norm_squared < 1e-6) { + out_vector[0] = 0; + out_vector[1] = 0; + out_vector[2] = 0; + } else { + const float d = 1.0f / std::sqrt(norm_squared); + out_vector[0] = x * d; + out_vector[1] = y * d; + out_vector[2] = z * d; + } + } + + int32_t quantization_bits_; + int32_t max_quantized_value_; + int32_t max_value_; + float dequantization_scale_; + int32_t center_value_; +}; +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_ diff --git a/contrib/draco/src/draco/compression/attributes/point_d_vector.h b/contrib/draco/src/draco/compression/attributes/point_d_vector.h new file mode 100644 index 000000000..3b115d500 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/point_d_vector.h @@ -0,0 +1,279 @@ +// Copyright 2018 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_ + +#include +#include +#include + +#include "draco/core/macros.h" + +namespace draco { + +// The main class of this file is PointDVector providing an interface similar to +// std::vector for arbitrary number of dimensions (without a template +// argument). PointDVectorIterator is a random access iterator, which allows for +// compatibility with existing algorithms. PseudoPointD provides for a view on +// the individual items in a contiguous block of memory, which is compatible +// with the swap function and is returned by a dereference of +// PointDVectorIterator. Swap functions provide for compatibility/specialization +// that allows these classes to work with currently utilized STL functions. + +// This class allows for swap functionality from the RandomIterator +// It seems problematic to bring this inside PointDVector due to templating. +template +class PseudoPointD { + public: + PseudoPointD(internal_t *mem, internal_t dimension) + : mem_(mem), dimension_(dimension) {} + + // Specifically copies referenced memory + void swap(PseudoPointD &other) noexcept { + for (internal_t dim = 0; dim < dimension_; dim += 1) { + std::swap(mem_[dim], other.mem_[dim]); + } + } + + PseudoPointD(const PseudoPointD &other) + : mem_(other.mem_), dimension_(other.dimension_) {} + + const internal_t &operator[](const size_t &n) const { + DRACO_DCHECK_LT(n, dimension_); + return mem_[n]; + } + internal_t &operator[](const size_t &n) { + DRACO_DCHECK_LT(n, dimension_); + return mem_[n]; + } + + bool operator==(const PseudoPointD &other) const { + for (auto dim = 0; dim < dimension_; dim += 1) { + if (mem_[dim] != other.mem_[dim]) { + return false; + } + } + return true; + } + bool operator!=(const PseudoPointD &other) const { + return !this->operator==(other); + } + + private: + internal_t *const mem_; + const internal_t dimension_; +}; + +// It seems problematic to bring this inside PointDVector due to templating. +template +void swap(draco::PseudoPointD &&a, + draco::PseudoPointD &&b) noexcept { + a.swap(b); +}; +template +void swap(draco::PseudoPointD &a, + draco::PseudoPointD &b) noexcept { + a.swap(b); +}; + +template +class PointDVector { + public: + PointDVector(const uint32_t n_items, const uint32_t dimensionality) + : n_items_(n_items), + dimensionality_(dimensionality), + item_size_bytes_(dimensionality * sizeof(internal_t)), + data_(n_items * dimensionality), + data0_(data_.data()) {} + // random access iterator + class PointDVectorIterator + : public std::iterator { + friend class PointDVector; + + public: + // std::iter_swap is called inside of std::partition and needs this + // specialized support + PseudoPointD operator*() const { + return PseudoPointD(vec_->data0_ + item_ * dimensionality_, + dimensionality_); + } + const PointDVectorIterator &operator++() { + item_ += 1; + return *this; + } + const PointDVectorIterator &operator--() { + item_ -= 1; + return *this; + } + PointDVectorIterator operator++(int32_t) { + PointDVectorIterator copy(*this); + item_ += 1; + return copy; + } + PointDVectorIterator operator--(int32_t) { + PointDVectorIterator copy(*this); + item_ -= 1; + return copy; + } + PointDVectorIterator &operator=(const PointDVectorIterator &other) { + this->item_ = other.item_; + return *this; + } + + bool operator==(const PointDVectorIterator &ref) const { + return item_ == ref.item_; + } + bool operator!=(const PointDVectorIterator &ref) const { + return item_ != ref.item_; + } + bool operator<(const PointDVectorIterator &ref) const { + return item_ < ref.item_; + } + bool operator>(const PointDVectorIterator &ref) const { + return item_ > ref.item_; + } + bool operator<=(const PointDVectorIterator &ref) const { + return item_ <= ref.item_; + } + bool operator>=(const PointDVectorIterator &ref) const { + return item_ >= ref.item_; + } + + PointDVectorIterator operator+(const int32_t &add) const { + PointDVectorIterator copy(vec_, item_ + add); + return copy; + } + PointDVectorIterator &operator+=(const int32_t &add) { + item_ += add; + return *this; + } + PointDVectorIterator operator-(const int32_t &sub) const { + PointDVectorIterator copy(vec_, item_ - sub); + return copy; + } + size_t operator-(const PointDVectorIterator &sub) const { + return (item_ - sub.item_); + } + + PointDVectorIterator &operator-=(const int32_t &sub) { + item_ -= sub; + return *this; + } + + internal_t *operator[](const size_t &n) const { + return vec_->data0_ + (item_ + n) * dimensionality_; + } + + protected: + explicit PointDVectorIterator(PointDVector *vec, size_t start_item) + : item_(start_item), vec_(vec), dimensionality_(vec->dimensionality_) {} + + private: + size_t item_; // this counts the item that should be referenced. + PointDVector *const vec_; // the thing that we're iterating on + const uint32_t dimensionality_; // local copy from vec_ + }; + + PointDVectorIterator begin() { return PointDVectorIterator(this, 0); } + PointDVectorIterator end() { return PointDVectorIterator(this, n_items_); } + + // operator[] allows for unprotected user-side usage of operator[] on the + // return value AS IF it were a natively indexable type like Point3* + internal_t *operator[](const uint32_t index) { + DRACO_DCHECK_LT(index, n_items_); + return data0_ + index * dimensionality_; + } + const internal_t *operator[](const uint32_t index) const { + DRACO_DCHECK_LT(index, n_items_); + return data0_ + index * dimensionality_; + } + + uint32_t size() const { return n_items_; } + size_t GetBufferSize() const { return data_.size(); } + + // copy a single contiguous 'item' from one PointDVector into this one. + void CopyItem(const PointDVector &source, const internal_t source_index, + const internal_t destination_index) { + DRACO_DCHECK(&source != this || + (&source == this && source_index != destination_index)); + DRACO_DCHECK_LT(destination_index, n_items_); + DRACO_DCHECK_LT(source_index, source.n_items_); + + // DRACO_DCHECK_EQ(source.n_items_, n_items_); // not technically necessary + DRACO_DCHECK_EQ(source.dimensionality_, dimensionality_); + + const internal_t *ref = source[source_index]; + internal_t *const dest = this->operator[](destination_index); + std::memcpy(dest, ref, item_size_bytes_); + } + + // Copy data directly off of an attribute buffer interleaved into internal + // memory. + void CopyAttribute( + // The dimensionality of the attribute being integrated + const internal_t attribute_dimensionality, + // The offset in dimensions to insert this attribute. + const internal_t offset_dimensionality, const internal_t index, + // The direct pointer to the data + const void *const attribute_item_data) { + // chunk copy + const size_t copy_size = sizeof(internal_t) * attribute_dimensionality; + + // a multiply and add can be optimized away with an iterator + std::memcpy(data0_ + index * dimensionality_ + offset_dimensionality, + attribute_item_data, copy_size); + } + // Copy data off of a contiguous buffer interleaved into internal memory + void CopyAttribute( + // The dimensionality of the attribute being integrated + const internal_t attribute_dimensionality, + // The offset in dimensions to insert this attribute. + const internal_t offset_dimensionality, + const internal_t *const attribute_mem) { + DRACO_DCHECK_LT(offset_dimensionality, + dimensionality_ - attribute_dimensionality); + // degenerate case block copy the whole buffer. + if (dimensionality_ == attribute_dimensionality) { + DRACO_DCHECK_EQ(offset_dimensionality, 0); + const size_t copy_size = + sizeof(internal_t) * attribute_dimensionality * n_items_; + std::memcpy(data0_, attribute_mem, copy_size); + } else { // chunk copy + const size_t copy_size = sizeof(internal_t) * attribute_dimensionality; + internal_t *internal_data; + const internal_t *attribute_data; + internal_t item; + for (internal_data = data0_ + offset_dimensionality, + attribute_data = attribute_mem, item = 0; + item < n_items_; internal_data += dimensionality_, + attribute_data += attribute_dimensionality, item += 1) { + std::memcpy(internal_data, attribute_data, copy_size); + } + } + } + + private: + // internal parameters. + const uint32_t n_items_; + const uint32_t dimensionality_; // The dimension of the points in the buffer + const uint32_t item_size_bytes_; + std::vector data_; // contiguously stored data. Never resized. + internal_t *const data0_; // raw pointer to base data. +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_ diff --git a/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc b/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc new file mode 100644 index 000000000..59f28f80b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/point_d_vector_test.cc @@ -0,0 +1,360 @@ +// Copyright 2018 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/point_d_vector.h" + +#include "draco/compression/point_cloud/algorithms/point_cloud_types.h" +#include "draco/core/draco_test_base.h" + +namespace draco { + +class PointDVectorTest : public ::testing::Test { + protected: + template + void TestIntegrity() {} + template + void TestSize() { + for (uint32_t n_items = 0; n_items <= 10; ++n_items) { + for (uint32_t dimensionality = 1; dimensionality <= 10; + ++dimensionality) { + draco::PointDVector var(n_items, dimensionality); + ASSERT_EQ(n_items, var.size()); + ASSERT_EQ(n_items * dimensionality, var.GetBufferSize()); + } + } + } + template + void TestContentsContiguous() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + template + void TestContentsDiscrete() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + for (PT item = 0; item < n_items; item += 1) { + var.CopyAttribute(att_dimensionality, offset_dimensionality, item, + attribute_data + item * att_dimensionality); + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + + template + void TestContentsCopy() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + PointDVector dest(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT item = 0; item < n_items; item += 1) { + dest.CopyItem(var, item, item); + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + template + void TestIterator() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + for (uint32_t dimensionality = 1; dimensionality < 10; + dimensionality += 2) { + for (uint32_t att_dimensionality = 1; + att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + PointDVector dest(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT item = 0; item < n_items; item += 1) { + dest.CopyItem(var, item, item); + } + + auto V0 = var.begin(); + auto VE = var.end(); + auto D0 = dest.begin(); + auto DE = dest.end(); + + while (V0 != VE && D0 != DE) { + ASSERT_EQ(*D0, *V0); // compare PseudoPointD + // verify elemental values + for (auto index = 0; index < dimensionality; index += 1) { + ASSERT_EQ((*D0)[index], (*V0)[index]); + } + ++V0; + ++D0; + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + } + } + template + void TestPoint3Iterator() { + for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) { + const uint32_t dimensionality = 3; + // for (uint32_t dimensionality = 1; dimensionality < 10; + // dimensionality += 2) { + const uint32_t att_dimensionality = 3; + // for (uint32_t att_dimensionality = 1; + // att_dimensionality <= dimensionality; att_dimensionality += 2) { + for (uint32_t offset_dimensionality = 0; + offset_dimensionality < dimensionality - att_dimensionality; + ++offset_dimensionality) { + PointDVector var(n_items, dimensionality); + PointDVector dest(n_items, dimensionality); + + std::vector att(n_items * att_dimensionality); + std::vector att3(n_items); + for (PT val = 0; val < n_items; val += 1) { + att3[val][0] = val; + att3[val][1] = val; + att3[val][2] = val; + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + att[val * att_dimensionality + att_dim] = val; + } + } + const PT *const attribute_data = att.data(); + + var.CopyAttribute(att_dimensionality, offset_dimensionality, + attribute_data); + + for (PT item = 0; item < n_items; item += 1) { + dest.CopyItem(var, item, item); + } + + auto aV0 = att3.begin(); + auto aVE = att3.end(); + auto V0 = var.begin(); + auto VE = var.end(); + auto D0 = dest.begin(); + auto DE = dest.end(); + + while (aV0 != aVE && V0 != VE && D0 != DE) { + ASSERT_EQ(*D0, *V0); // compare PseudoPointD + // verify elemental values + for (auto index = 0; index < dimensionality; index += 1) { + ASSERT_EQ((*D0)[index], (*V0)[index]); + ASSERT_EQ((*D0)[index], (*aV0)[index]); + ASSERT_EQ((*aV0)[index], (*V0)[index]); + } + ++aV0; + ++V0; + ++D0; + } + + for (PT val = 0; val < n_items; val += 1) { + for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) { + ASSERT_EQ(var[val][offset_dimensionality + att_dim], val); + ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val); + } + } + } + } + } + + void TestPseudoPointDSwap() { + draco::Point3ui val = {0, 1, 2}; + draco::Point3ui dest = {10, 11, 12}; + draco::PseudoPointD val_src1(&val[0], 3); + draco::PseudoPointD dest_src1(&dest[0], 3); + + ASSERT_EQ(val_src1[0], 0); + ASSERT_EQ(val_src1[1], 1); + ASSERT_EQ(val_src1[2], 2); + ASSERT_EQ(dest_src1[0], 10); + ASSERT_EQ(dest_src1[1], 11); + ASSERT_EQ(dest_src1[2], 12); + + ASSERT_NE(val_src1, dest_src1); + + swap(val_src1, dest_src1); + + ASSERT_EQ(dest_src1[0], 0); + ASSERT_EQ(dest_src1[1], 1); + ASSERT_EQ(dest_src1[2], 2); + ASSERT_EQ(val_src1[0], 10); + ASSERT_EQ(val_src1[1], 11); + ASSERT_EQ(val_src1[2], 12); + + ASSERT_NE(val_src1, dest_src1); + } + void TestPseudoPointDEquality() { + draco::Point3ui val = {0, 1, 2}; + draco::Point3ui dest = {0, 1, 2}; + draco::PseudoPointD val_src1(&val[0], 3); + draco::PseudoPointD val_src2(&val[0], 3); + draco::PseudoPointD dest_src1(&dest[0], 3); + draco::PseudoPointD dest_src2(&dest[0], 3); + + ASSERT_EQ(val_src1, val_src1); + ASSERT_EQ(val_src1, val_src2); + ASSERT_EQ(dest_src1, val_src1); + ASSERT_EQ(dest_src1, val_src2); + ASSERT_EQ(val_src2, val_src1); + ASSERT_EQ(val_src2, val_src2); + ASSERT_EQ(dest_src2, val_src1); + ASSERT_EQ(dest_src2, val_src2); + + for (auto i = 0; i < 3; i++) { + ASSERT_EQ(val_src1[i], val_src1[i]); + ASSERT_EQ(val_src1[i], val_src2[i]); + ASSERT_EQ(dest_src1[i], val_src1[i]); + ASSERT_EQ(dest_src1[i], val_src2[i]); + ASSERT_EQ(val_src2[i], val_src1[i]); + ASSERT_EQ(val_src2[i], val_src2[i]); + ASSERT_EQ(dest_src2[i], val_src1[i]); + ASSERT_EQ(dest_src2[i], val_src2[i]); + } + } + void TestPseudoPointDInequality() { + draco::Point3ui val = {0, 1, 2}; + draco::Point3ui dest = {1, 2, 3}; + draco::PseudoPointD val_src1(&val[0], 3); + draco::PseudoPointD val_src2(&val[0], 3); + draco::PseudoPointD dest_src1(&dest[0], 3); + draco::PseudoPointD dest_src2(&dest[0], 3); + + ASSERT_EQ(val_src1, val_src1); + ASSERT_EQ(val_src1, val_src2); + ASSERT_NE(dest_src1, val_src1); + ASSERT_NE(dest_src1, val_src2); + ASSERT_EQ(val_src2, val_src1); + ASSERT_EQ(val_src2, val_src2); + ASSERT_NE(dest_src2, val_src1); + ASSERT_NE(dest_src2, val_src2); + + for (auto i = 0; i < 3; i++) { + ASSERT_EQ(val_src1[i], val_src1[i]); + ASSERT_EQ(val_src1[i], val_src2[i]); + ASSERT_NE(dest_src1[i], val_src1[i]); + ASSERT_NE(dest_src1[i], val_src2[i]); + ASSERT_EQ(val_src2[i], val_src1[i]); + ASSERT_EQ(val_src2[i], val_src2[i]); + ASSERT_NE(dest_src2[i], val_src1[i]); + ASSERT_NE(dest_src2[i], val_src2[i]); + } + } +}; + +TEST_F(PointDVectorTest, VectorTest) { + TestSize(); + TestContentsDiscrete(); + TestContentsContiguous(); + TestContentsCopy(); + TestIterator(); + TestPoint3Iterator(); +} +TEST_F(PointDVectorTest, PseudoPointDTest) { + TestPseudoPointDSwap(); + TestPseudoPointDEquality(); + TestPseudoPointDInequality(); +} +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/points_sequencer.h b/contrib/draco/src/draco/compression/attributes/points_sequencer.h new file mode 100644 index 000000000..2f4f7e16d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/points_sequencer.h @@ -0,0 +1,63 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_ + +#include + +#include "draco/attributes/point_attribute.h" + +namespace draco { + +// Class for generating a sequence of point ids that can be used to encode +// or decode attribute values in a specific order. +// See sequential_attribute_encoders/decoders_controller.h for more details. +class PointsSequencer { + public: + PointsSequencer() : out_point_ids_(nullptr) {} + virtual ~PointsSequencer() = default; + + // Fills the |out_point_ids| with the generated sequence of point ids. + bool GenerateSequence(std::vector *out_point_ids) { + out_point_ids_ = out_point_ids; + return GenerateSequenceInternal(); + } + + // Appends a point to the sequence. + void AddPointId(PointIndex point_id) { out_point_ids_->push_back(point_id); } + + // Sets the correct mapping between point ids and value ids. I.e., the inverse + // of the |out_point_ids|. In general, |out_point_ids_| does not contain + // sufficient information to compute the inverse map, because not all point + // ids are necessarily contained within the map. + // Must be implemented for sequencers that are used by attribute decoders. + virtual bool UpdatePointToAttributeIndexMapping(PointAttribute * /* attr */) { + return false; + } + + protected: + // Method that needs to be implemented by the derived classes. The + // implementation is responsible for filling |out_point_ids_| with the valid + // sequence of point ids. + virtual bool GenerateSequenceInternal() = 0; + std::vector *out_point_ids() const { return out_point_ids_; } + + private: + std::vector *out_point_ids_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h new file mode 100644 index 000000000..36c124baa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h @@ -0,0 +1,231 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_ + +#include +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/core/varint_decoding.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for predictions encoded with the constrained multi-parallelogram +// encoder. See the corresponding encoder for more details about the prediction +// method. +template +class MeshPredictionSchemeConstrainedMultiParallelogramDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeConstrainedMultiParallelogramDecoder( + const PointAttribute *attribute) + : MeshPredictionSchemeDecoder( + attribute), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + MeshPredictionSchemeConstrainedMultiParallelogramDecoder( + const PointAttribute *attribute, const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } + + private: + typedef constrained_multi_parallelogram::Mode Mode; + static constexpr int kMaxNumParallelograms = + constrained_multi_parallelogram::kMaxNumParallelograms; + // Crease edges are used to store whether any given edge should be used for + // parallelogram prediction or not. New values are added in the order in which + // the edges are processed. For better compression, the flags are stored in + // in separate contexts based on the number of available parallelograms at a + // given vertex. + std::vector is_crease_edge_[kMaxNumParallelograms]; + Mode selected_mode_; +}; + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(num_components); + + // Predicted values for all simple parallelograms encountered at any given + // vertex. + std::vector pred_vals[kMaxNumParallelograms]; + for (int i = 0; i < kMaxNumParallelograms; ++i) { + pred_vals[i].resize(num_components, 0); + } + this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr, + out_data); + + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // Current position in the |is_crease_edge_| array for each context. + std::vector is_crease_edge_pos(kMaxNumParallelograms, 0); + + // Used to store predicted value for multi-parallelogram prediction. + std::vector multi_pred_vals(num_components); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 1; p < corner_map_size; ++p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + bool first_pass = true; + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, out_data, + num_components, &(pred_vals[num_parallelograms][0]))) { + // Parallelogram prediction applied and stored in + // |pred_vals[num_parallelograms]| + ++num_parallelograms; + // Stop processing when we reach the maximum number of allowed + // parallelograms. + if (num_parallelograms == kMaxNumParallelograms) { + break; + } + } + + // Proceed to the next corner attached to the vertex. First swing left + // and if we reach a boundary, swing right from the start corner. + if (first_pass) { + corner_id = table->SwingLeft(corner_id); + } else { + corner_id = table->SwingRight(corner_id); + } + if (corner_id == start_corner_id) { + break; + } + if (corner_id == kInvalidCornerIndex && first_pass) { + first_pass = false; + corner_id = table->SwingRight(start_corner_id); + } + } + + // Check which of the available parallelograms are actually used and compute + // the final predicted value. + int num_used_parallelograms = 0; + if (num_parallelograms > 0) { + for (int i = 0; i < num_components; ++i) { + multi_pred_vals[i] = 0; + } + // Check which parallelograms are actually used. + for (int i = 0; i < num_parallelograms; ++i) { + const int context = num_parallelograms - 1; + const int pos = is_crease_edge_pos[context]++; + if (is_crease_edge_[context].size() <= pos) { + return false; + } + const bool is_crease = is_crease_edge_[context][pos]; + if (!is_crease) { + ++num_used_parallelograms; + for (int j = 0; j < num_components; ++j) { + multi_pred_vals[j] += pred_vals[i][j]; + } + } + } + } + const int dst_offset = p * num_components; + if (num_used_parallelograms == 0) { + // No parallelogram was valid. + // We use the last decoded point as a reference. + const int src_offset = (p - 1) * num_components; + this->transform().ComputeOriginalValue( + out_data + src_offset, in_corr + dst_offset, out_data + dst_offset); + } else { + // Compute the correction from the predicted value. + for (int c = 0; c < num_components; ++c) { + multi_pred_vals[c] /= num_used_parallelograms; + } + this->transform().ComputeOriginalValue( + multi_pred_vals.data(), in_corr + dst_offset, out_data + dst_offset); + } + } + return true; +} + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer + *buffer) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + // Decode prediction mode. + uint8_t mode; + if (!buffer->Decode(&mode)) { + return false; + } + + if (mode != Mode::OPTIMAL_MULTI_PARALLELOGRAM) { + // Unsupported mode. + return false; + } + } +#endif + + // Encode selected edges using separate rans bit coder for each context. + for (int i = 0; i < kMaxNumParallelograms; ++i) { + uint32_t num_flags; + if (!DecodeVarint(&num_flags, buffer)) { + return false; + } + if (num_flags > 0) { + is_crease_edge_[i].resize(num_flags); + RAnsBitDecoder decoder; + if (!decoder.StartDecoding(buffer)) { + return false; + } + for (uint32_t j = 0; j < num_flags; ++j) { + is_crease_edge_[i][j] = decoder.DecodeNextBit(); + } + decoder.EndDecoding(); + } + } + return MeshPredictionSchemeDecoder::DecodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h new file mode 100644 index 000000000..77df8ee24 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h @@ -0,0 +1,414 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_ + +#include +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/compression/entropy/shannon_entropy.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +// Compared to standard multi-parallelogram, constrained multi-parallelogram can +// explicitly select which of the available parallelograms are going to be used +// for the prediction by marking crease edges between two triangles. This +// requires storing extra data, but it allows the predictor to avoid using +// parallelograms that would lead to poor predictions. For improved efficiency, +// our current implementation limits the maximum number of used parallelograms +// to four, which covers >95% of the cases (on average, there are only two +// parallelograms available for any given vertex). +// All bits of the explicitly chosen configuration are stored together in a +// single context chosen by the total number of parallelograms available to +// choose from. +template +class MeshPredictionSchemeConstrainedMultiParallelogramEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeConstrainedMultiParallelogramEncoder( + const PointAttribute *attribute) + : MeshPredictionSchemeEncoder( + attribute), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + MeshPredictionSchemeConstrainedMultiParallelogramEncoder( + const PointAttribute *attribute, const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } + + private: + // Function used to compute number of bits needed to store overhead of the + // predictor. In this case, we consider overhead to be all bits that mark + // whether a parallelogram should be used for prediction or not. The input + // to this method is the total number of parallelograms that were evaluated so + // far(total_parallelogram), and the number of parallelograms we decided to + // use for prediction (total_used_parallelograms). + // Returns number of bits required to store the overhead. + int64_t ComputeOverheadBits(int64_t total_used_parallelograms, + int64_t total_parallelogram) const { + // For now we assume RAns coding for the bits where the total required size + // is directly correlated to the binary entropy of the input stream. + // TODO(ostava): This should be generalized in case we use other binary + // coding scheme. + const double entropy = ComputeBinaryShannonEntropy( + static_cast(total_parallelogram), + static_cast(total_used_parallelograms)); + + // Round up to the nearest full bit. + return static_cast( + ceil(static_cast(total_parallelogram) * entropy)); + } + + // Struct that contains data used for measuring the error of each available + // parallelogram configuration. + struct Error { + Error() : num_bits(0), residual_error(0) {} + + // Primary metric: number of bits required to store the data as a result of + // the selected prediction configuration. + int num_bits; + // Secondary metric: absolute difference of residuals for the given + // configuration. + int residual_error; + + bool operator<(const Error &e) const { + if (num_bits < e.num_bits) { + return true; + } + if (num_bits > e.num_bits) { + return false; + } + return residual_error < e.residual_error; + } + }; + + // Computes error for predicting |predicted_val| instead of |actual_val|. + // Error is computed as the number of bits needed to encode the difference + // between the values. + Error ComputeError(const DataTypeT *predicted_val, + const DataTypeT *actual_val, int *out_residuals, + int num_components) { + Error error; + + for (int i = 0; i < num_components; ++i) { + const int dif = (predicted_val[i] - actual_val[i]); + error.residual_error += std::abs(dif); + out_residuals[i] = dif; + // Entropy needs unsigned symbols, so convert the signed difference to an + // unsigned symbol. + entropy_symbols_[i] = ConvertSignedIntToSymbol(dif); + } + + // Generate entropy data for case that this configuration was used. + // Note that the entropy stream is NOT updated in this case. + const auto entropy_data = + entropy_tracker_.Peek(entropy_symbols_.data(), num_components); + + error.num_bits = entropy_tracker_.GetNumberOfDataBits(entropy_data) + + entropy_tracker_.GetNumberOfRAnsTableBits(entropy_data); + return error; + } + + typedef constrained_multi_parallelogram::Mode Mode; + static constexpr int kMaxNumParallelograms = + constrained_multi_parallelogram::kMaxNumParallelograms; + // Crease edges are used to store whether any given edge should be used for + // parallelogram prediction or not. New values are added in the order in which + // the edges are processed. For better compression, the flags are stored in + // in separate contexts based on the number of available parallelograms at a + // given vertex. + // TODO(draco-eng) reconsider std::vector (performance/space). + std::vector is_crease_edge_[kMaxNumParallelograms]; + Mode selected_mode_; + + ShannonEntropyTracker entropy_tracker_; + + // Temporary storage for symbols that are fed into the |entropy_stream|. + // Always contains only |num_components| entries. + std::vector entropy_symbols_; +}; + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder< + DataTypeT, TransformT, MeshDataT>:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(in_data, size, num_components); + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // Predicted values for all simple parallelograms encountered at any given + // vertex. + std::vector pred_vals[kMaxNumParallelograms]; + for (int i = 0; i < kMaxNumParallelograms; ++i) { + pred_vals[i].resize(num_components); + } + // Used to store predicted value for various multi-parallelogram predictions + // (combinations of simple parallelogram predictions). + std::vector multi_pred_vals(num_components); + entropy_symbols_.resize(num_components); + + // Struct for holding data about prediction configuration for different sets + // of used parallelograms. + struct PredictionConfiguration { + PredictionConfiguration() + : error(), configuration(0), num_used_parallelograms(0) {} + Error error; + uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it. + int num_used_parallelograms; + std::vector predicted_value; + std::vector residuals; + }; + + // Bit-field used for computing permutations of excluded edges + // (parallelograms). + bool exluded_parallelograms[kMaxNumParallelograms]; + + // Data about the number of used parallelogram and total number of available + // parallelogram for each context. Used to compute overhead needed for storing + // the parallelogram choices made by the encoder. + int64_t total_used_parallelograms[kMaxNumParallelograms] = {0}; + int64_t total_parallelograms[kMaxNumParallelograms] = {0}; + + std::vector current_residuals(num_components); + + // We start processing the vertices from the end because this prediction uses + // data from previous entries that could be overwritten when an entry is + // processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size()) - 1; + p > 0; --p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + // Go over all corners attached to the vertex and compute the predicted + // value from the parallelograms defined by their opposite faces. + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + bool first_pass = true; + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, in_data, num_components, + &(pred_vals[num_parallelograms][0]))) { + // Parallelogram prediction applied and stored in + // |pred_vals[num_parallelograms]| + ++num_parallelograms; + // Stop processing when we reach the maximum number of allowed + // parallelograms. + if (num_parallelograms == kMaxNumParallelograms) { + break; + } + } + + // Proceed to the next corner attached to the vertex. First swing left + // and if we reach a boundary, swing right from the start corner. + if (first_pass) { + corner_id = table->SwingLeft(corner_id); + } else { + corner_id = table->SwingRight(corner_id); + } + if (corner_id == start_corner_id) { + break; + } + if (corner_id == kInvalidCornerIndex && first_pass) { + first_pass = false; + corner_id = table->SwingRight(start_corner_id); + } + } + + // Offset to the target (destination) vertex. + const int dst_offset = p * num_components; + Error error; + + // Compute all prediction errors for all possible configurations of + // available parallelograms. + + // Variable for holding the best configuration that has been found so far. + PredictionConfiguration best_prediction; + + // Compute delta coding error (configuration when no parallelogram is + // selected). + const int src_offset = (p - 1) * num_components; + error = ComputeError(in_data + src_offset, in_data + dst_offset, + ¤t_residuals[0], num_components); + + if (num_parallelograms > 0) { + total_parallelograms[num_parallelograms - 1] += num_parallelograms; + const int64_t new_overhead_bits = + ComputeOverheadBits(total_used_parallelograms[num_parallelograms - 1], + total_parallelograms[num_parallelograms - 1]); + error.num_bits += new_overhead_bits; + } + + best_prediction.error = error; + best_prediction.configuration = 0; + best_prediction.num_used_parallelograms = 0; + best_prediction.predicted_value.assign( + in_data + src_offset, in_data + src_offset + num_components); + best_prediction.residuals.assign(current_residuals.begin(), + current_residuals.end()); + + // Compute prediction error for different cases of used parallelograms. + for (int num_used_parallelograms = 1; + num_used_parallelograms <= num_parallelograms; + ++num_used_parallelograms) { + // Mark all parallelograms as excluded. + std::fill(exluded_parallelograms, + exluded_parallelograms + num_parallelograms, true); + // TODO(draco-eng) maybe this should be another std::fill. + // Mark the first |num_used_parallelograms| as not excluded. + for (int j = 0; j < num_used_parallelograms; ++j) { + exluded_parallelograms[j] = false; + } + // Permute over the excluded edges and compute error for each + // configuration (permutation of excluded parallelograms). + do { + // Reset the multi-parallelogram predicted values. + for (int j = 0; j < num_components; ++j) { + multi_pred_vals[j] = 0; + } + uint8_t configuration = 0; + for (int j = 0; j < num_parallelograms; ++j) { + if (exluded_parallelograms[j]) { + continue; + } + for (int c = 0; c < num_components; ++c) { + multi_pred_vals[c] += pred_vals[j][c]; + } + // Set jth bit of the configuration. + configuration |= (1 << j); + } + + for (int j = 0; j < num_components; ++j) { + multi_pred_vals[j] /= num_used_parallelograms; + } + error = ComputeError(multi_pred_vals.data(), in_data + dst_offset, + ¤t_residuals[0], num_components); + if (num_parallelograms > 0) { + const int64_t new_overhead_bits = ComputeOverheadBits( + total_used_parallelograms[num_parallelograms - 1] + + num_used_parallelograms, + total_parallelograms[num_parallelograms - 1]); + + // Add overhead bits to the total error. + error.num_bits += new_overhead_bits; + } + if (error < best_prediction.error) { + best_prediction.error = error; + best_prediction.configuration = configuration; + best_prediction.num_used_parallelograms = num_used_parallelograms; + best_prediction.predicted_value.assign(multi_pred_vals.begin(), + multi_pred_vals.end()); + best_prediction.residuals.assign(current_residuals.begin(), + current_residuals.end()); + } + } while (std::next_permutation( + exluded_parallelograms, exluded_parallelograms + num_parallelograms)); + } + if (num_parallelograms > 0) { + total_used_parallelograms[num_parallelograms - 1] += + best_prediction.num_used_parallelograms; + } + + // Update the entropy stream by adding selected residuals as symbols to the + // stream. + for (int i = 0; i < num_components; ++i) { + entropy_symbols_[i] = + ConvertSignedIntToSymbol(best_prediction.residuals[i]); + } + entropy_tracker_.Push(entropy_symbols_.data(), num_components); + + for (int i = 0; i < num_parallelograms; ++i) { + if ((best_prediction.configuration & (1 << i)) == 0) { + // Parallelogram not used, mark the edge as crease. + is_crease_edge_[num_parallelograms - 1].push_back(true); + } else { + // Parallelogram used. Add it to the predicted value and mark the + // edge as not a crease. + is_crease_edge_[num_parallelograms - 1].push_back(false); + } + } + this->transform().ComputeCorrection(in_data + dst_offset, + best_prediction.predicted_value.data(), + out_corr + dst_offset); + } + // First element is always fixed because it cannot be predicted. + for (int i = 0; i < num_components; ++i) { + pred_vals[0][i] = static_cast(0); + } + this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr); + return true; +} + +template +bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder< + DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer + *buffer) { + // Encode selected edges using separate rans bit coder for each context. + for (int i = 0; i < kMaxNumParallelograms; ++i) { + // |i| is the context based on the number of available parallelograms, which + // is always equal to |i + 1|. + const int num_used_parallelograms = i + 1; + EncodeVarint(is_crease_edge_[i].size(), buffer); + if (is_crease_edge_[i].size()) { + RAnsBitEncoder encoder; + encoder.StartEncoding(); + // Encode the crease edge flags in the reverse vertex order that is needed + // be the decoder. Note that for the currently supported mode, each vertex + // has exactly |num_used_parallelograms| edges that need to be encoded. + for (int j = static_cast(is_crease_edge_[i].size()) - + num_used_parallelograms; + j >= 0; j -= num_used_parallelograms) { + // Go over all edges of the current vertex. + for (int k = 0; k < num_used_parallelograms; ++k) { + encoder.EncodeBit(is_crease_edge_[i][j + k]); + } + } + encoder.EndEncoding(buffer); + } + } + return MeshPredictionSchemeEncoder::EncodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h new file mode 100644 index 000000000..c7a4e351a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h @@ -0,0 +1,34 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_ + +namespace draco { + +// Data shared between constrained multi-parallelogram encoder and decoder. +namespace constrained_multi_parallelogram { + +enum Mode { + // Selects the optimal multi-parallelogram from up to 4 available + // parallelograms. + OPTIMAL_MULTI_PARALLELOGRAM = 0, +}; + +static constexpr int kMaxNumParallelograms = 4; + +} // namespace constrained_multi_parallelogram +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h new file mode 100644 index 000000000..2960a5e71 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h @@ -0,0 +1,72 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_ + +#include "draco/mesh/corner_table.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// Class stores data about the connectivity data of the mesh and information +// about how the connectivity was encoded/decoded. +template +class MeshPredictionSchemeData { + public: + typedef CornerTableT CornerTable; + MeshPredictionSchemeData() + : mesh_(nullptr), + corner_table_(nullptr), + vertex_to_data_map_(nullptr), + data_to_corner_map_(nullptr) {} + + void Set(const Mesh *mesh, const CornerTable *table, + const std::vector *data_to_corner_map, + const std::vector *vertex_to_data_map) { + mesh_ = mesh; + corner_table_ = table; + data_to_corner_map_ = data_to_corner_map; + vertex_to_data_map_ = vertex_to_data_map; + } + + const Mesh *mesh() const { return mesh_; } + const CornerTable *corner_table() const { return corner_table_; } + const std::vector *vertex_to_data_map() const { + return vertex_to_data_map_; + } + const std::vector *data_to_corner_map() const { + return data_to_corner_map_; + } + bool IsInitialized() const { + return mesh_ != nullptr && corner_table_ != nullptr && + vertex_to_data_map_ != nullptr && data_to_corner_map_ != nullptr; + } + + private: + const Mesh *mesh_; + const CornerTable *corner_table_; + + // Mapping between vertices and their encoding order. I.e. when an attribute + // entry on a given vertex was encoded. + const std::vector *vertex_to_data_map_; + + // Array that stores which corner was processed when a given attribute entry + // was encoded or decoded. + const std::vector *data_to_corner_map_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h new file mode 100644 index 000000000..6694a981c --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h @@ -0,0 +1,46 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" + +namespace draco { + +// Base class for all mesh prediction scheme decoders that use the mesh +// connectivity data. |MeshDataT| can be any class that provides the same +// interface as the PredictionSchemeMeshData class. +template +class MeshPredictionSchemeDecoder + : public PredictionSchemeDecoder { + public: + typedef MeshDataT MeshData; + MeshPredictionSchemeDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : PredictionSchemeDecoder(attribute, transform), + mesh_data_(mesh_data) {} + + protected: + const MeshData &mesh_data() const { return mesh_data_; } + + private: + MeshData mesh_data_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h new file mode 100644 index 000000000..ab3c81a39 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h @@ -0,0 +1,46 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" + +namespace draco { + +// Base class for all mesh prediction scheme encoders that use the mesh +// connectivity data. |MeshDataT| can be any class that provides the same +// interface as the PredictionSchemeMeshData class. +template +class MeshPredictionSchemeEncoder + : public PredictionSchemeEncoder { + public: + typedef MeshDataT MeshData; + MeshPredictionSchemeEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : PredictionSchemeEncoder(attribute, transform), + mesh_data_(mesh_data) {} + + protected: + const MeshData &mesh_data() const { return mesh_data_; } + + private: + MeshData mesh_data_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h new file mode 100644 index 000000000..da1387a30 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h @@ -0,0 +1,172 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// See MeshPredictionSchemeGeometricNormalEncoder for documentation. +template +class MeshPredictionSchemeGeometricNormalDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = typename MeshPredictionSchemeDecoder::CorrType; + MeshPredictionSchemeGeometricNormalDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + private: + MeshPredictionSchemeGeometricNormalDecoder() {} + + public: + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_GEOMETRIC_NORMAL; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + if (!octahedron_tool_box_.IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + void SetQuantizationBits(int q) { + octahedron_tool_box_.SetQuantizationBits(q); + } + + private: + MeshPredictionSchemeGeometricNormalPredictorArea + predictor_; + OctahedronToolBox octahedron_tool_box_; + RAnsBitDecoder flip_normal_bit_decoder_; +}; + +template +bool MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, + MeshDataT>::ComputeOriginalValues(const CorrType *in_corr, + DataTypeT *out_data, int /* size */, + int num_components, + const PointIndex *entry_to_point_id_map) { + this->SetQuantizationBits(this->transform().quantization_bits()); + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + DRACO_DCHECK(this->IsInitialized()); + + // Expecting in_data in octahedral coordinates, i.e., portable attribute. + DRACO_DCHECK_EQ(num_components, 2); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + + VectorD pred_normal_3d; + int32_t pred_normal_oct[2]; + + for (int data_id = 0; data_id < corner_map_size; ++data_id) { + const CornerIndex corner_id = + this->mesh_data().data_to_corner_map()->at(data_id); + predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data()); + + // Compute predicted octahedral coordinates. + octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data()); + DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(), + octahedron_tool_box_.center_value()); + if (flip_normal_bit_decoder_.DecodeNextBit()) { + pred_normal_3d = -pred_normal_3d; + } + octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords( + pred_normal_3d.data(), pred_normal_oct, pred_normal_oct + 1); + + const int data_offset = data_id * 2; + this->transform().ComputeOriginalValue( + pred_normal_oct, in_corr + data_offset, out_data + data_offset); + } + flip_normal_bit_decoder_.EndDecoding(); + return true; +} + +template +bool MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer + *buffer) { + // Get data needed for transform + if (!this->transform().DecodeTransformData(buffer)) { + return false; + } + +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + uint8_t prediction_mode; + if (!buffer->Decode(&prediction_mode)) { + return false; + } + + if (!predictor_.SetNormalPredictionMode( + NormalPredictionMode(prediction_mode))) { + return false; + } + } +#endif + + // Init normal flips. + if (!flip_normal_bit_decoder_.StartDecoding(buffer)) { + return false; + } + + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h new file mode 100644 index 000000000..cf146f83a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h @@ -0,0 +1,180 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +// Prediction scheme for normals based on the underlying geometry. +// At a smooth vertices normals are computed by weighting the normals of +// adjacent faces with the area of these faces. At seams, the same approach +// applies for seam corners. +template +class MeshPredictionSchemeGeometricNormalEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = typename MeshPredictionSchemeEncoder::CorrType; + MeshPredictionSchemeGeometricNormalEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_GEOMETRIC_NORMAL; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + + private: + void SetQuantizationBits(int q) { + DRACO_DCHECK_GE(q, 2); + DRACO_DCHECK_LE(q, 30); + octahedron_tool_box_.SetQuantizationBits(q); + } + MeshPredictionSchemeGeometricNormalPredictorArea + predictor_; + + OctahedronToolBox octahedron_tool_box_; + RAnsBitEncoder flip_normal_bit_encoder_; +}; + +template +bool MeshPredictionSchemeGeometricNormalEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex *entry_to_point_id_map) { + this->SetQuantizationBits(this->transform().quantization_bits()); + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + DRACO_DCHECK(this->IsInitialized()); + // Expecting in_data in octahedral coordinates, i.e., portable attribute. + DRACO_DCHECK_EQ(num_components, 2); + + flip_normal_bit_encoder_.StartEncoding(); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + + VectorD pred_normal_3d; + VectorD pos_pred_normal_oct; + VectorD neg_pred_normal_oct; + VectorD pos_correction; + VectorD neg_correction; + for (int data_id = 0; data_id < corner_map_size; ++data_id) { + const CornerIndex corner_id = + this->mesh_data().data_to_corner_map()->at(data_id); + predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data()); + + // Compute predicted octahedral coordinates. + octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data()); + DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(), + octahedron_tool_box_.center_value()); + + // Compute octahedral coordinates for both possible directions. + octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords( + pred_normal_3d.data(), pos_pred_normal_oct.data(), + pos_pred_normal_oct.data() + 1); + pred_normal_3d = -pred_normal_3d; + octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords( + pred_normal_3d.data(), neg_pred_normal_oct.data(), + neg_pred_normal_oct.data() + 1); + + // Choose the one with the best correction value. + const int data_offset = data_id * 2; + this->transform().ComputeCorrection(in_data + data_offset, + pos_pred_normal_oct.data(), + pos_correction.data()); + this->transform().ComputeCorrection(in_data + data_offset, + neg_pred_normal_oct.data(), + neg_correction.data()); + pos_correction[0] = octahedron_tool_box_.ModMax(pos_correction[0]); + pos_correction[1] = octahedron_tool_box_.ModMax(pos_correction[1]); + neg_correction[0] = octahedron_tool_box_.ModMax(neg_correction[0]); + neg_correction[1] = octahedron_tool_box_.ModMax(neg_correction[1]); + if (pos_correction.AbsSum() < neg_correction.AbsSum()) { + flip_normal_bit_encoder_.EncodeBit(false); + (out_corr + data_offset)[0] = + octahedron_tool_box_.MakePositive(pos_correction[0]); + (out_corr + data_offset)[1] = + octahedron_tool_box_.MakePositive(pos_correction[1]); + } else { + flip_normal_bit_encoder_.EncodeBit(true); + (out_corr + data_offset)[0] = + octahedron_tool_box_.MakePositive(neg_correction[0]); + (out_corr + data_offset)[1] = + octahedron_tool_box_.MakePositive(neg_correction[1]); + } + } + return true; +} + +template +bool MeshPredictionSchemeGeometricNormalEncoder< + DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer + *buffer) { + if (!this->transform().EncodeTransformData(buffer)) { + return false; + } + + // Encode normal flips. + flip_normal_bit_encoder_.EndEncoding(buffer); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h new file mode 100644 index 000000000..775eded6b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h @@ -0,0 +1,117 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h" + +namespace draco { + +// This predictor estimates the normal via the surrounding triangles of the +// given corner. Triangles are weighted according to their area. +template +class MeshPredictionSchemeGeometricNormalPredictorArea + : public MeshPredictionSchemeGeometricNormalPredictorBase< + DataTypeT, TransformT, MeshDataT> { + typedef MeshPredictionSchemeGeometricNormalPredictorBase< + DataTypeT, TransformT, MeshDataT> + Base; + + public: + explicit MeshPredictionSchemeGeometricNormalPredictorArea(const MeshDataT &md) + : Base(md) { + this->SetNormalPredictionMode(TRIANGLE_AREA); + }; + virtual ~MeshPredictionSchemeGeometricNormalPredictorArea() {} + + // Computes predicted octahedral coordinates on a given corner. + void ComputePredictedValue(CornerIndex corner_id, + DataTypeT *prediction) override { + DRACO_DCHECK(this->IsInitialized()); + typedef typename MeshDataT::CornerTable CornerTable; + const CornerTable *const corner_table = this->mesh_data_.corner_table(); + // Going to compute the predicted normal from the surrounding triangles + // according to the connectivity of the given corner table. + VertexCornersIterator cit(corner_table, corner_id); + // Position of central vertex does not change in loop. + const VectorD pos_cent = this->GetPositionForCorner(corner_id); + // Computing normals for triangles and adding them up. + + VectorD normal; + CornerIndex c_next, c_prev; + while (!cit.End()) { + // Getting corners. + if (this->normal_prediction_mode_ == ONE_TRIANGLE) { + c_next = corner_table->Next(corner_id); + c_prev = corner_table->Previous(corner_id); + } else { + c_next = corner_table->Next(cit.Corner()); + c_prev = corner_table->Previous(cit.Corner()); + } + const VectorD pos_next = this->GetPositionForCorner(c_next); + const VectorD pos_prev = this->GetPositionForCorner(c_prev); + + // Computing delta vectors to next and prev. + const VectorD delta_next = pos_next - pos_cent; + const VectorD delta_prev = pos_prev - pos_cent; + + // Computing cross product. + const VectorD cross = CrossProduct(delta_next, delta_prev); + + // Prevent signed integer overflows by doing math as unsigned. + auto normal_data = reinterpret_cast(normal.data()); + auto cross_data = reinterpret_cast(cross.data()); + normal_data[0] = normal_data[0] + cross_data[0]; + normal_data[1] = normal_data[1] + cross_data[1]; + normal_data[2] = normal_data[2] + cross_data[2]; + + cit.Next(); + } + + // Convert to int32_t, make sure entries are not too large. + constexpr int64_t upper_bound = 1 << 29; + if (this->normal_prediction_mode_ == ONE_TRIANGLE) { + const int32_t abs_sum = static_cast(normal.AbsSum()); + if (abs_sum > upper_bound) { + const int64_t quotient = abs_sum / upper_bound; + normal = normal / quotient; + } + } else { + const int64_t abs_sum = normal.AbsSum(); + if (abs_sum > upper_bound) { + const int64_t quotient = abs_sum / upper_bound; + normal = normal / quotient; + } + } + DRACO_DCHECK_LE(normal.AbsSum(), upper_bound); + prediction[0] = static_cast(normal[0]); + prediction[1] = static_cast(normal[1]); + prediction[2] = static_cast(normal[2]); + } + bool SetNormalPredictionMode(NormalPredictionMode mode) override { + if (mode == ONE_TRIANGLE) { + this->normal_prediction_mode_ = mode; + return true; + } else if (mode == TRIANGLE_AREA) { + this->normal_prediction_mode_ = mode; + return true; + } + return false; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h new file mode 100644 index 000000000..a554dda96 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h @@ -0,0 +1,96 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_ + +#include + +#include "draco/attributes/point_attribute.h" +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/math_utils.h" +#include "draco/core/vector_d.h" +#include "draco/mesh/corner_table.h" +#include "draco/mesh/corner_table_iterators.h" + +namespace draco { + +// Base class for geometric normal predictors using position attribute. +template +class MeshPredictionSchemeGeometricNormalPredictorBase { + protected: + explicit MeshPredictionSchemeGeometricNormalPredictorBase(const MeshDataT &md) + : pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + mesh_data_(md) {} + virtual ~MeshPredictionSchemeGeometricNormalPredictorBase() {} + + public: + void SetPositionAttribute(const PointAttribute &position_attribute) { + pos_attribute_ = &position_attribute; + } + void SetEntryToPointIdMap(const PointIndex *map) { + entry_to_point_id_map_ = map; + } + bool IsInitialized() const { + if (pos_attribute_ == nullptr) { + return false; + } + if (entry_to_point_id_map_ == nullptr) { + return false; + } + return true; + } + + virtual bool SetNormalPredictionMode(NormalPredictionMode mode) = 0; + virtual NormalPredictionMode GetNormalPredictionMode() const { + return normal_prediction_mode_; + } + + protected: + VectorD GetPositionForDataId(int data_id) const { + DRACO_DCHECK(this->IsInitialized()); + const auto point_id = entry_to_point_id_map_[data_id]; + const auto pos_val_id = pos_attribute_->mapped_index(point_id); + VectorD pos; + pos_attribute_->ConvertValue(pos_val_id, &pos[0]); + return pos; + } + VectorD GetPositionForCorner(CornerIndex ci) const { + DRACO_DCHECK(this->IsInitialized()); + const auto corner_table = mesh_data_.corner_table(); + const auto vert_id = corner_table->Vertex(ci).value(); + const auto data_id = mesh_data_.vertex_to_data_map()->at(vert_id); + return GetPositionForDataId(data_id); + } + VectorD GetOctahedralCoordForDataId(int data_id, + const DataTypeT *data) const { + DRACO_DCHECK(this->IsInitialized()); + const int data_offset = data_id * 2; + return VectorD(data[data_offset], data[data_offset + 1]); + } + // Computes predicted octahedral coordinates on a given corner. + virtual void ComputePredictedValue(CornerIndex corner_id, + DataTypeT *prediction) = 0; + + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + MeshDataT mesh_data_; + NormalPredictionMode normal_prediction_mode_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h new file mode 100644 index 000000000..fc82e0a8f --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h @@ -0,0 +1,126 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for predictions encoded by multi-parallelogram encoding scheme. +// See the corresponding encoder for method description. +template +class MeshPredictionSchemeMultiParallelogramDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeMultiParallelogramDecoder( + const PointAttribute *attribute) + : MeshPredictionSchemeDecoder( + attribute) {} + MeshPredictionSchemeMultiParallelogramDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeMultiParallelogramDecoder:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(num_components); + + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + std::unique_ptr parallelogram_pred_vals( + new DataTypeT[num_components]()); + + this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data); + + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 1; p < corner_map_size; ++p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, out_data, + num_components, parallelogram_pred_vals.get())) { + for (int c = 0; c < num_components; ++c) { + pred_vals[c] += parallelogram_pred_vals[c]; + } + ++num_parallelograms; + } + + // Proceed to the next corner attached to the vertex. + corner_id = table->SwingRight(corner_id); + if (corner_id == start_corner_id) { + corner_id = kInvalidCornerIndex; + } + } + + const int dst_offset = p * num_components; + if (num_parallelograms == 0) { + // No parallelogram was valid. + // We use the last decoded point as a reference. + const int src_offset = (p - 1) * num_components; + this->transform().ComputeOriginalValue( + out_data + src_offset, in_corr + dst_offset, out_data + dst_offset); + } else { + // Compute the correction from the predicted value. + for (int c = 0; c < num_components; ++c) { + pred_vals[c] /= num_parallelograms; + } + this->transform().ComputeOriginalValue( + pred_vals.get(), in_corr + dst_offset, out_data + dst_offset); + } + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_ +#endif diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h new file mode 100644 index 000000000..301b357d4 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h @@ -0,0 +1,133 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + +namespace draco { + +// Multi parallelogram prediction predicts attribute values using information +// from all opposite faces to the predicted vertex, compared to the standard +// prediction scheme, where only one opposite face is used (see +// prediction_scheme_parallelogram.h). This approach is generally slower than +// the standard parallelogram prediction, but it usually results in better +// prediction (5 - 20% based on the quantization level. Better gains can be +// achieved when more aggressive quantization is used). +template +class MeshPredictionSchemeMultiParallelogramEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + + explicit MeshPredictionSchemeMultiParallelogramEncoder( + const PointAttribute *attribute) + : MeshPredictionSchemeEncoder( + attribute) {} + MeshPredictionSchemeMultiParallelogramEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_MULTI_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeMultiParallelogramEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(in_data, size, num_components); + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + std::unique_ptr parallelogram_pred_vals( + new DataTypeT[num_components]()); + + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size() - 1); + p > 0; --p) { + const CornerIndex start_corner_id = + this->mesh_data().data_to_corner_map()->at(p); + + // Go over all corners attached to the vertex and compute the predicted + // value from the parallelograms defined by their opposite faces. + CornerIndex corner_id(start_corner_id); + int num_parallelograms = 0; + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + while (corner_id != kInvalidCornerIndex) { + if (ComputeParallelogramPrediction( + p, corner_id, table, *vertex_to_data_map, in_data, num_components, + parallelogram_pred_vals.get())) { + for (int c = 0; c < num_components; ++c) { + pred_vals[c] += parallelogram_pred_vals[c]; + } + ++num_parallelograms; + } + + // Proceed to the next corner attached to the vertex. + corner_id = table->SwingRight(corner_id); + if (corner_id == start_corner_id) { + corner_id = kInvalidCornerIndex; + } + } + const int dst_offset = p * num_components; + if (num_parallelograms == 0) { + // No parallelogram was valid. + // We use the last encoded point as a reference. + const int src_offset = (p - 1) * num_components; + this->transform().ComputeCorrection( + in_data + dst_offset, in_data + src_offset, out_corr + dst_offset); + } else { + // Compute the correction from the predicted value. + for (int c = 0; c < num_components; ++c) { + pred_vals[c] /= num_parallelograms; + } + this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(), + out_corr + dst_offset); + } + } + // First element is always fixed because it cannot be predicted. + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h new file mode 100644 index 000000000..4d47ddf30 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h @@ -0,0 +1,98 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + +namespace draco { + +// Decoder for attribute values encoded with the standard parallelogram +// prediction. See the description of the corresponding encoder for more +// details. +template +class MeshPredictionSchemeParallelogramDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + explicit MeshPredictionSchemeParallelogramDecoder( + const PointAttribute *attribute) + : MeshPredictionSchemeDecoder( + attribute) {} + MeshPredictionSchemeParallelogramDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeParallelogramDecoder:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(num_components); + + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + + // Restore the first value. + this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 1; p < corner_map_size; ++p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + const int dst_offset = p * num_components; + if (!ComputeParallelogramPrediction(p, corner_id, table, + *vertex_to_data_map, out_data, + num_components, pred_vals.get())) { + // Parallelogram could not be computed, Possible because some of the + // vertices are not valid (not encoded yet). + // We use the last encoded point as a reference (delta coding). + const int src_offset = (p - 1) * num_components; + this->transform().ComputeOriginalValue( + out_data + src_offset, in_corr + dst_offset, out_data + dst_offset); + } else { + // Apply the parallelogram prediction. + this->transform().ComputeOriginalValue( + pred_vals.get(), in_corr + dst_offset, out_data + dst_offset); + } + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h new file mode 100644 index 000000000..f00801932 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h @@ -0,0 +1,111 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h" + +namespace draco { + +// Parallelogram prediction predicts an attribute value V from three vertices +// on the opposite face to the predicted vertex. The values on the three +// vertices are used to construct a parallelogram V' = O - A - B, where O is the +// value on the opposite vertex, and A, B are values on the shared vertices: +// V +// / \ +// / \ +// / \ +// A-------B +// \ / +// \ / +// \ / +// O +// +template +class MeshPredictionSchemeParallelogramEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + using CornerTable = typename MeshDataT::CornerTable; + explicit MeshPredictionSchemeParallelogramEncoder( + const PointAttribute *attribute) + : MeshPredictionSchemeEncoder( + attribute) {} + MeshPredictionSchemeParallelogramEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_PARALLELOGRAM; + } + + bool IsInitialized() const override { + return this->mesh_data().IsInitialized(); + } +}; + +template +bool MeshPredictionSchemeParallelogramEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex * /* entry_to_point_id_map */) { + this->transform().Init(in_data, size, num_components); + // For storage of prediction values (already initialized to zero). + std::unique_ptr pred_vals(new DataTypeT[num_components]()); + + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + const CornerTable *const table = this->mesh_data().corner_table(); + const std::vector *const vertex_to_data_map = + this->mesh_data().vertex_to_data_map(); + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size() - 1); + p > 0; --p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + const int dst_offset = p * num_components; + if (!ComputeParallelogramPrediction(p, corner_id, table, + *vertex_to_data_map, in_data, + num_components, pred_vals.get())) { + // Parallelogram could not be computed, Possible because some of the + // vertices are not valid (not encoded yet). + // We use the last encoded point as a reference (delta coding). + const int src_offset = (p - 1) * num_components; + this->transform().ComputeCorrection( + in_data + dst_offset, in_data + src_offset, out_corr + dst_offset); + } else { + // Apply the parallelogram prediction. + this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(), + out_corr + dst_offset); + } + } + // First element is always fixed because it cannot be predicted. + for (int i = 0; i < num_components; ++i) { + pred_vals[i] = static_cast(0); + } + this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h new file mode 100644 index 000000000..fd10fb524 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h @@ -0,0 +1,78 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Shared functionality for different parallelogram prediction schemes. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_ + +#include "draco/mesh/corner_table.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// TODO(draco-eng) consolidate Vertex/next/previous queries to one call +// (performance). +template +inline void GetParallelogramEntries( + const CornerIndex ci, const CornerTableT *table, + const std::vector &vertex_to_data_map, int *opp_entry, + int *next_entry, int *prev_entry) { + // One vertex of the input |table| correspond to exactly one attribute value + // entry. The |table| can be either CornerTable for per-vertex attributes, + // or MeshAttributeCornerTable for attributes with interior seams. + *opp_entry = vertex_to_data_map[table->Vertex(ci).value()]; + *next_entry = vertex_to_data_map[table->Vertex(table->Next(ci)).value()]; + *prev_entry = vertex_to_data_map[table->Vertex(table->Previous(ci)).value()]; +} + +// Computes parallelogram prediction for a given corner and data entry id. +// The prediction is stored in |out_prediction|. +// Function returns false when the prediction couldn't be computed, e.g. because +// not all entry points were available. +template +inline bool ComputeParallelogramPrediction( + int data_entry_id, const CornerIndex ci, const CornerTableT *table, + const std::vector &vertex_to_data_map, const DataTypeT *in_data, + int num_components, DataTypeT *out_prediction) { + const CornerIndex oci = table->Opposite(ci); + if (oci == kInvalidCornerIndex) { + return false; + } + int vert_opp, vert_next, vert_prev; + GetParallelogramEntries(oci, table, vertex_to_data_map, + &vert_opp, &vert_next, &vert_prev); + if (vert_opp < data_entry_id && vert_next < data_entry_id && + vert_prev < data_entry_id) { + // Apply the parallelogram prediction. + const int v_opp_off = vert_opp * num_components; + const int v_next_off = vert_next * num_components; + const int v_prev_off = vert_prev * num_components; + for (int c = 0; c < num_components; ++c) { + const int64_t in_data_next_off = in_data[v_next_off + c]; + const int64_t in_data_prev_off = in_data[v_prev_off + c]; + const int64_t in_data_opp_off = in_data[v_opp_off + c]; + const int64_t result = + (in_data_next_off + in_data_prev_off) - in_data_opp_off; + + out_prediction[c] = static_cast(result); + } + return true; + } + return false; // Not all data is available for prediction +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h new file mode 100644 index 000000000..02cf7e60f --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h @@ -0,0 +1,344 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/core/varint_decoding.h" +#include "draco/core/vector_d.h" +#include "draco/draco_features.h" +#include "draco/mesh/corner_table.h" + +namespace draco { + +// Decoder for predictions of UV coordinates encoded by our specialized texture +// coordinate predictor. See the corresponding encoder for more details. Note +// that this predictor is not portable and should not be used anymore. See +// MeshPredictionSchemeTexCoordsPortableEncoder/Decoder for a portable version +// of this prediction scheme. +template +class MeshPredictionSchemeTexCoordsDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = typename MeshPredictionSchemeDecoder::CorrType; + MeshPredictionSchemeTexCoordsDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data, int version) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + num_components_(0), + version_(version) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_DEPRECATED; + } + + bool IsInitialized() const override { + if (pos_attribute_ == nullptr) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att == nullptr) { + return false; + } + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + pos_attribute_ = att; + return true; + } + + protected: + Vector3f GetPositionForEntryId(int entry_id) const { + const PointIndex point_id = entry_to_point_id_map_[entry_id]; + Vector3f pos; + pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), + &pos[0]); + return pos; + } + + Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { + const int data_offset = entry_id * num_components_; + return Vector2f(static_cast(data[data_offset]), + static_cast(data[data_offset + 1])); + } + + void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id); + + private: + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + std::unique_ptr predicted_value_; + int num_components_; + // Encoded / decoded array of UV flips. + std::vector orientations_; + int version_; +}; + +template +bool MeshPredictionSchemeTexCoordsDecoder:: + ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int /* size */, int num_components, + const PointIndex *entry_to_point_id_map) { + num_components_ = num_components; + entry_to_point_id_map_ = entry_to_point_id_map; + predicted_value_ = + std::unique_ptr(new DataTypeT[num_components]); + this->transform().Init(num_components); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 0; p < corner_map_size; ++p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + ComputePredictedValue(corner_id, out_data, p); + + const int dst_offset = p * num_components; + this->transform().ComputeOriginalValue( + predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsDecoder:: + DecodePredictionData(DecoderBuffer *buffer) { + // Decode the delta coded orientations. + uint32_t num_orientations = 0; + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + if (!buffer->Decode(&num_orientations)) { + return false; + } + } else { + if (!DecodeVarint(&num_orientations, buffer)) { + return false; + } + } + if (num_orientations == 0) { + return false; + } + orientations_.resize(num_orientations); + bool last_orientation = true; + RAnsBitDecoder decoder; + if (!decoder.StartDecoding(buffer)) { + return false; + } + for (uint32_t i = 0; i < num_orientations; ++i) { + if (!decoder.DecodeNextBit()) { + last_orientation = !last_orientation; + } + orientations_[i] = last_orientation; + } + decoder.EndDecoding(); + return MeshPredictionSchemeDecoder::DecodePredictionData(buffer); +} + +template +void MeshPredictionSchemeTexCoordsDecoder:: + ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id) { + // Compute the predicted UV coordinate from the positions on all corners + // of the processed triangle. For the best prediction, the UV coordinates + // on the next/previous corners need to be already encoded/decoded. + const CornerIndex next_corner_id = + this->mesh_data().corner_table()->Next(corner_id); + const CornerIndex prev_corner_id = + this->mesh_data().corner_table()->Previous(corner_id); + // Get the encoded data ids from the next and previous corners. + // The data id is the encoding order of the UV coordinates. + int next_data_id, prev_data_id; + + int next_vert_id, prev_vert_id; + next_vert_id = + this->mesh_data().corner_table()->Vertex(next_corner_id).value(); + prev_vert_id = + this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); + + next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); + prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); + + if (prev_data_id < data_id && next_data_id < data_id) { + // Both other corners have available UV coordinates for prediction. + const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); + const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); + if (p_uv == n_uv) { + // We cannot do a reliable prediction on degenerated UV triangles. + predicted_value_[0] = static_cast(p_uv[0]); + predicted_value_[1] = static_cast(p_uv[1]); + return; + } + + // Get positions at all corners. + const Vector3f tip_pos = GetPositionForEntryId(data_id); + const Vector3f next_pos = GetPositionForEntryId(next_data_id); + const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); + // Use the positions of the above triangle to predict the texture coordinate + // on the tip corner C. + // Convert the triangle into a new coordinate system defined by orthogonal + // bases vectors S, T, where S is vector prev_pos - next_pos and T is an + // perpendicular vector to S in the same plane as vector the + // tip_pos - next_pos. + // The transformed triangle in the new coordinate system is then going to + // be represented as: + // + // 1 ^ + // | + // | + // | C + // | / \ + // | / \ + // |/ \ + // N--------------P + // 0 1 + // + // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is + // at (1, 0). Our goal is to compute the position of the tip_pos point (C) + // in this new coordinate space (s, t). + // + const Vector3f pn = prev_pos - next_pos; + const Vector3f cn = tip_pos - next_pos; + const float pn_norm2_squared = pn.SquaredNorm(); + // Coordinate s of the tip corner C is simply the dot product of the + // normalized vectors |pn| and |cn| (normalized by the length of |pn|). + // Since both of these vectors are normalized, we don't need to perform the + // normalization explicitly and instead we can just use the squared norm + // of |pn| as a denominator of the resulting dot product of non normalized + // vectors. + float s, t; + // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are + // the same positions (e.g. because they were quantized to the same + // location). + if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) { + s = pn.Dot(cn) / pn_norm2_squared; + // To get the coordinate t, we can use formula: + // t = |C-N - (P-N) * s| / |P-N| + // Do not use std::sqrt to avoid changes in the bitstream. + t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); + } else { + s = 0; + t = 0; + } + + // Now we need to transform the point (s, t) to the texture coordinate space + // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets + // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can + // be used to define transformation from the normalized coordinate system + // to the texture coordinate system using a 3x3 affine matrix M: + // + // M = | PN_UV[0] -PN_UV[1] N_UV[0] | + // | PN_UV[1] PN_UV[0] N_UV[1] | + // | 0 0 1 | + // + // The predicted point C_UV in the texture space is then equal to + // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped + // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) + // as the prediction. + const Vector2f pn_uv = p_uv - n_uv; + const float pnus = pn_uv[0] * s + n_uv[0]; + const float pnut = pn_uv[0] * t; + const float pnvs = pn_uv[1] * s + n_uv[1]; + const float pnvt = pn_uv[1] * t; + Vector2f predicted_uv; + + // When decoding the data, we already know which orientation to use. + const bool orientation = orientations_.back(); + orientations_.pop_back(); + if (orientation) + predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut); + else + predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut); + + if (std::is_integral::value) { + // Round the predicted value for integer types. + if (std::isnan(predicted_uv[0])) { + predicted_value_[0] = INT_MIN; + } else { + predicted_value_[0] = static_cast(floor(predicted_uv[0] + 0.5)); + } + if (std::isnan(predicted_uv[1])) { + predicted_value_[1] = INT_MIN; + } else { + predicted_value_[1] = static_cast(floor(predicted_uv[1] + 0.5)); + } + } else { + predicted_value_[0] = static_cast(predicted_uv[0]); + predicted_value_[1] = static_cast(predicted_uv[1]); + } + return; + } + // Else we don't have available textures on both corners. For such case we + // can't use positions for predicting the uv value and we resort to delta + // coding. + int data_offset = 0; + if (prev_data_id < data_id) { + // Use the value on the previous corner as the prediction. + data_offset = prev_data_id * num_components_; + } + if (next_data_id < data_id) { + // Use the value on the next corner as the prediction. + data_offset = next_data_id * num_components_; + } else { + // None of the other corners have a valid value. Use the last encoded value + // as the prediction if possible. + if (data_id > 0) { + data_offset = (data_id - 1) * num_components_; + } else { + // We are encoding the first value. Predict 0. + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = 0; + } + return; + } + } + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = data[data_offset + i]; + } +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ +#endif diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h new file mode 100644 index 000000000..813b72ae3 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h @@ -0,0 +1,318 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/core/varint_encoding.h" +#include "draco/core/vector_d.h" +#include "draco/mesh/corner_table.h" + +namespace draco { + +// Prediction scheme designed for predicting texture coordinates from known +// spatial position of vertices. For good parametrization, the ratios between +// triangle edge lengths should be about the same in both the spatial and UV +// coordinate spaces, which makes the positions a good predictor for the UV +// coordinates. +template +class MeshPredictionSchemeTexCoordsEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = typename MeshPredictionSchemeEncoder::CorrType; + MeshPredictionSchemeTexCoordsEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + num_components_(0) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_DEPRECATED; + } + + bool IsInitialized() const override { + if (pos_attribute_ == nullptr) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + pos_attribute_ = att; + return true; + } + + protected: + Vector3f GetPositionForEntryId(int entry_id) const { + const PointIndex point_id = entry_to_point_id_map_[entry_id]; + Vector3f pos; + pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), + &pos[0]); + return pos; + } + + Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { + const int data_offset = entry_id * num_components_; + return Vector2f(static_cast(data[data_offset]), + static_cast(data[data_offset + 1])); + } + + void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id); + + private: + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + std::unique_ptr predicted_value_; + int num_components_; + // Encoded / decoded array of UV flips. + std::vector orientations_; +}; + +template +bool MeshPredictionSchemeTexCoordsEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex *entry_to_point_id_map) { + num_components_ = num_components; + entry_to_point_id_map_ = entry_to_point_id_map; + predicted_value_ = + std::unique_ptr(new DataTypeT[num_components]); + this->transform().Init(in_data, size, num_components); + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size()) - 1; + p >= 0; --p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + ComputePredictedValue(corner_id, in_data, p); + + const int dst_offset = p * num_components; + this->transform().ComputeCorrection( + in_data + dst_offset, predicted_value_.get(), out_corr + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsEncoder:: + EncodePredictionData(EncoderBuffer *buffer) { + // Encode the delta-coded orientations using arithmetic coding. + const uint32_t num_orientations = static_cast(orientations_.size()); + EncodeVarint(num_orientations, buffer); + bool last_orientation = true; + RAnsBitEncoder encoder; + encoder.StartEncoding(); + for (bool orientation : orientations_) { + encoder.EncodeBit(orientation == last_orientation); + last_orientation = orientation; + } + encoder.EndEncoding(buffer); + return MeshPredictionSchemeEncoder::EncodePredictionData(buffer); +} + +template +void MeshPredictionSchemeTexCoordsEncoder:: + ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id) { + // Compute the predicted UV coordinate from the positions on all corners + // of the processed triangle. For the best prediction, the UV coordinates + // on the next/previous corners need to be already encoded/decoded. + const CornerIndex next_corner_id = + this->mesh_data().corner_table()->Next(corner_id); + const CornerIndex prev_corner_id = + this->mesh_data().corner_table()->Previous(corner_id); + // Get the encoded data ids from the next and previous corners. + // The data id is the encoding order of the UV coordinates. + int next_data_id, prev_data_id; + + int next_vert_id, prev_vert_id; + next_vert_id = + this->mesh_data().corner_table()->Vertex(next_corner_id).value(); + prev_vert_id = + this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); + + next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); + prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); + + if (prev_data_id < data_id && next_data_id < data_id) { + // Both other corners have available UV coordinates for prediction. + const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); + const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); + if (p_uv == n_uv) { + // We cannot do a reliable prediction on degenerated UV triangles. + predicted_value_[0] = static_cast(p_uv[0]); + predicted_value_[1] = static_cast(p_uv[1]); + return; + } + + // Get positions at all corners. + const Vector3f tip_pos = GetPositionForEntryId(data_id); + const Vector3f next_pos = GetPositionForEntryId(next_data_id); + const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); + // Use the positions of the above triangle to predict the texture coordinate + // on the tip corner C. + // Convert the triangle into a new coordinate system defined by orthogonal + // bases vectors S, T, where S is vector prev_pos - next_pos and T is an + // perpendicular vector to S in the same plane as vector the + // tip_pos - next_pos. + // The transformed triangle in the new coordinate system is then going to + // be represented as: + // + // 1 ^ + // | + // | + // | C + // | / \ + // | / \ + // |/ \ + // N--------------P + // 0 1 + // + // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is + // at (1, 0). Our goal is to compute the position of the tip_pos point (C) + // in this new coordinate space (s, t). + // + const Vector3f pn = prev_pos - next_pos; + const Vector3f cn = tip_pos - next_pos; + const float pn_norm2_squared = pn.SquaredNorm(); + // Coordinate s of the tip corner C is simply the dot product of the + // normalized vectors |pn| and |cn| (normalized by the length of |pn|). + // Since both of these vectors are normalized, we don't need to perform the + // normalization explicitly and instead we can just use the squared norm + // of |pn| as a denominator of the resulting dot product of non normalized + // vectors. + float s, t; + // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are + // the same positions (e.g. because they were quantized to the same + // location). + if (pn_norm2_squared > 0) { + s = pn.Dot(cn) / pn_norm2_squared; + // To get the coordinate t, we can use formula: + // t = |C-N - (P-N) * s| / |P-N| + // Do not use std::sqrt to avoid changes in the bitstream. + t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); + } else { + s = 0; + t = 0; + } + + // Now we need to transform the point (s, t) to the texture coordinate space + // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets + // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can + // be used to define transformation from the normalized coordinate system + // to the texture coordinate system using a 3x3 affine matrix M: + // + // M = | PN_UV[0] -PN_UV[1] N_UV[0] | + // | PN_UV[1] PN_UV[0] N_UV[1] | + // | 0 0 1 | + // + // The predicted point C_UV in the texture space is then equal to + // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped + // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) + // as the prediction. + const Vector2f pn_uv = p_uv - n_uv; + const float pnus = pn_uv[0] * s + n_uv[0]; + const float pnut = pn_uv[0] * t; + const float pnvs = pn_uv[1] * s + n_uv[1]; + const float pnvt = pn_uv[1] * t; + Vector2f predicted_uv; + + // When encoding compute both possible vectors and determine which one + // results in a better prediction. + const Vector2f predicted_uv_0(pnus - pnvt, pnvs + pnut); + const Vector2f predicted_uv_1(pnus + pnvt, pnvs - pnut); + const Vector2f c_uv = GetTexCoordForEntryId(data_id, data); + if ((c_uv - predicted_uv_0).SquaredNorm() < + (c_uv - predicted_uv_1).SquaredNorm()) { + predicted_uv = predicted_uv_0; + orientations_.push_back(true); + } else { + predicted_uv = predicted_uv_1; + orientations_.push_back(false); + } + if (std::is_integral::value) { + // Round the predicted value for integer types. + predicted_value_[0] = static_cast(floor(predicted_uv[0] + 0.5)); + predicted_value_[1] = static_cast(floor(predicted_uv[1] + 0.5)); + } else { + predicted_value_[0] = static_cast(predicted_uv[0]); + predicted_value_[1] = static_cast(predicted_uv[1]); + } + return; + } + // Else we don't have available textures on both corners. For such case we + // can't use positions for predicting the uv value and we resort to delta + // coding. + int data_offset = 0; + if (prev_data_id < data_id) { + // Use the value on the previous corner as the prediction. + data_offset = prev_data_id * num_components_; + } + if (next_data_id < data_id) { + // Use the value on the next corner as the prediction. + data_offset = next_data_id * num_components_; + } else { + // None of the other corners have a valid value. Use the last encoded value + // as the prediction if possible. + if (data_id > 0) { + data_offset = (data_id - 1) * num_components_; + } else { + // We are encoding the first value. Predict 0. + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = 0; + } + return; + } + } + for (int i = 0; i < num_components_; ++i) { + predicted_value_[i] = data[data_offset + i]; + } +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h new file mode 100644 index 000000000..83d496639 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h @@ -0,0 +1,143 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" + +namespace draco { + +// Decoder for predictions of UV coordinates encoded by our specialized and +// portable texture coordinate predictor. See the corresponding encoder for more +// details. +template +class MeshPredictionSchemeTexCoordsPortableDecoder + : public MeshPredictionSchemeDecoder { + public: + using CorrType = typename MeshPredictionSchemeDecoder::CorrType; + MeshPredictionSchemeTexCoordsPortableDecoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeDecoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + + bool DecodePredictionData(DecoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_PORTABLE; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (!att || att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + + private: + MeshPredictionSchemeTexCoordsPortablePredictor + predictor_; +}; + +template +bool MeshPredictionSchemeTexCoordsPortableDecoder< + DataTypeT, TransformT, + MeshDataT>::ComputeOriginalValues(const CorrType *in_corr, + DataTypeT *out_data, int /* size */, + int num_components, + const PointIndex *entry_to_point_id_map) { + if (num_components != MeshPredictionSchemeTexCoordsPortablePredictor< + DataTypeT, MeshDataT>::kNumComponents) { + return false; + } + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + this->transform().Init(num_components); + + const int corner_map_size = + static_cast(this->mesh_data().data_to_corner_map()->size()); + for (int p = 0; p < corner_map_size; ++p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + if (!predictor_.template ComputePredictedValue(corner_id, out_data, + p)) { + return false; + } + + const int dst_offset = p * num_components; + this->transform().ComputeOriginalValue(predictor_.predicted_value(), + in_corr + dst_offset, + out_data + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsPortableDecoder< + DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer + *buffer) { + // Decode the delta coded orientations. + int32_t num_orientations = 0; + if (!buffer->Decode(&num_orientations) || num_orientations < 0) { + return false; + } + predictor_.ResizeOrientations(num_orientations); + bool last_orientation = true; + RAnsBitDecoder decoder; + if (!decoder.StartDecoding(buffer)) { + return false; + } + for (int i = 0; i < num_orientations; ++i) { + if (!decoder.DecodeNextBit()) { + last_orientation = !last_orientation; + } + predictor_.set_orientation(i, last_orientation); + } + decoder.EndDecoding(); + return MeshPredictionSchemeDecoder::DecodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h new file mode 100644 index 000000000..741ec66dc --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h @@ -0,0 +1,133 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" + +namespace draco { + +// Prediction scheme designed for predicting texture coordinates from known +// spatial position of vertices. For isometric parametrizations, the ratios +// between triangle edge lengths should be about the same in both the spatial +// and UV coordinate spaces, which makes the positions a good predictor for the +// UV coordinates. Note that this may not be the optimal approach for other +// parametrizations such as projective ones. +template +class MeshPredictionSchemeTexCoordsPortableEncoder + : public MeshPredictionSchemeEncoder { + public: + using CorrType = typename MeshPredictionSchemeEncoder::CorrType; + MeshPredictionSchemeTexCoordsPortableEncoder(const PointAttribute *attribute, + const TransformT &transform, + const MeshDataT &mesh_data) + : MeshPredictionSchemeEncoder( + attribute, transform, mesh_data), + predictor_(mesh_data) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + + bool EncodePredictionData(EncoderBuffer *buffer) override; + + PredictionSchemeMethod GetPredictionMethod() const override { + return MESH_PREDICTION_TEX_COORDS_PORTABLE; + } + + bool IsInitialized() const override { + if (!predictor_.IsInitialized()) { + return false; + } + if (!this->mesh_data().IsInitialized()) { + return false; + } + return true; + } + + int GetNumParentAttributes() const override { return 1; } + + GeometryAttribute::Type GetParentAttributeType(int i) const override { + DRACO_DCHECK_EQ(i, 0); + (void)i; + return GeometryAttribute::POSITION; + } + + bool SetParentAttribute(const PointAttribute *att) override { + if (att->attribute_type() != GeometryAttribute::POSITION) { + return false; // Invalid attribute type. + } + if (att->num_components() != 3) { + return false; // Currently works only for 3 component positions. + } + predictor_.SetPositionAttribute(*att); + return true; + } + + private: + MeshPredictionSchemeTexCoordsPortablePredictor + predictor_; +}; + +template +bool MeshPredictionSchemeTexCoordsPortableEncoder:: + ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr, + int size, int num_components, + const PointIndex *entry_to_point_id_map) { + predictor_.SetEntryToPointIdMap(entry_to_point_id_map); + this->transform().Init(in_data, size, num_components); + // We start processing from the end because this prediction uses data from + // previous entries that could be overwritten when an entry is processed. + for (int p = + static_cast(this->mesh_data().data_to_corner_map()->size() - 1); + p >= 0; --p) { + const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); + predictor_.template ComputePredictedValue(corner_id, in_data, p); + + const int dst_offset = p * num_components; + this->transform().ComputeCorrection(in_data + dst_offset, + predictor_.predicted_value(), + out_corr + dst_offset); + } + return true; +} + +template +bool MeshPredictionSchemeTexCoordsPortableEncoder< + DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer + *buffer) { + // Encode the delta-coded orientations using arithmetic coding. + const int32_t num_orientations = predictor_.num_orientations(); + buffer->Encode(num_orientations); + bool last_orientation = true; + RAnsBitEncoder encoder; + encoder.StartEncoding(); + for (int i = 0; i < num_orientations; ++i) { + const bool orientation = predictor_.orientation(i); + encoder.EncodeBit(orientation == last_orientation); + last_orientation = orientation; + } + encoder.EndEncoding(buffer); + return MeshPredictionSchemeEncoder::EncodePredictionData(buffer); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h new file mode 100644 index 000000000..f05e5ddd7 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h @@ -0,0 +1,263 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_ + +#include + +#include "draco/attributes/point_attribute.h" +#include "draco/core/math_utils.h" +#include "draco/core/vector_d.h" +#include "draco/mesh/corner_table.h" + +namespace draco { + +// Predictor functionality used for portable UV prediction by both encoder and +// decoder. +template +class MeshPredictionSchemeTexCoordsPortablePredictor { + public: + static constexpr int kNumComponents = 2; + + explicit MeshPredictionSchemeTexCoordsPortablePredictor(const MeshDataT &md) + : pos_attribute_(nullptr), + entry_to_point_id_map_(nullptr), + mesh_data_(md) {} + void SetPositionAttribute(const PointAttribute &position_attribute) { + pos_attribute_ = &position_attribute; + } + void SetEntryToPointIdMap(const PointIndex *map) { + entry_to_point_id_map_ = map; + } + bool IsInitialized() const { return pos_attribute_ != nullptr; } + + VectorD GetPositionForEntryId(int entry_id) const { + const PointIndex point_id = entry_to_point_id_map_[entry_id]; + VectorD pos; + pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), + &pos[0]); + return pos; + } + + VectorD GetTexCoordForEntryId(int entry_id, + const DataTypeT *data) const { + const int data_offset = entry_id * kNumComponents; + return VectorD(data[data_offset], data[data_offset + 1]); + } + + // Computes predicted UV coordinates on a given corner. The coordinates are + // stored in |predicted_value_| member. + template + bool ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, + int data_id); + + const DataTypeT *predicted_value() const { return predicted_value_; } + bool orientation(int i) const { return orientations_[i]; } + void set_orientation(int i, bool v) { orientations_[i] = v; } + size_t num_orientations() const { return orientations_.size(); } + void ResizeOrientations(int num_orientations) { + orientations_.resize(num_orientations); + } + + private: + const PointAttribute *pos_attribute_; + const PointIndex *entry_to_point_id_map_; + DataTypeT predicted_value_[kNumComponents]; + // Encoded / decoded array of UV flips. + // TODO(ostava): We should remove this and replace this with in-place encoding + // and decoding to avoid unnecessary copy. + std::vector orientations_; + MeshDataT mesh_data_; +}; + +template +template +bool MeshPredictionSchemeTexCoordsPortablePredictor< + DataTypeT, MeshDataT>::ComputePredictedValue(CornerIndex corner_id, + const DataTypeT *data, + int data_id) { + // Compute the predicted UV coordinate from the positions on all corners + // of the processed triangle. For the best prediction, the UV coordinates + // on the next/previous corners need to be already encoded/decoded. + const CornerIndex next_corner_id = mesh_data_.corner_table()->Next(corner_id); + const CornerIndex prev_corner_id = + mesh_data_.corner_table()->Previous(corner_id); + // Get the encoded data ids from the next and previous corners. + // The data id is the encoding order of the UV coordinates. + int next_data_id, prev_data_id; + + int next_vert_id, prev_vert_id; + next_vert_id = mesh_data_.corner_table()->Vertex(next_corner_id).value(); + prev_vert_id = mesh_data_.corner_table()->Vertex(prev_corner_id).value(); + + next_data_id = mesh_data_.vertex_to_data_map()->at(next_vert_id); + prev_data_id = mesh_data_.vertex_to_data_map()->at(prev_vert_id); + + if (prev_data_id < data_id && next_data_id < data_id) { + // Both other corners have available UV coordinates for prediction. + const VectorD n_uv = GetTexCoordForEntryId(next_data_id, data); + const VectorD p_uv = GetTexCoordForEntryId(prev_data_id, data); + if (p_uv == n_uv) { + // We cannot do a reliable prediction on degenerated UV triangles. + predicted_value_[0] = p_uv[0]; + predicted_value_[1] = p_uv[1]; + return true; + } + + // Get positions at all corners. + const VectorD tip_pos = GetPositionForEntryId(data_id); + const VectorD next_pos = GetPositionForEntryId(next_data_id); + const VectorD prev_pos = GetPositionForEntryId(prev_data_id); + // We use the positions of the above triangle to predict the texture + // coordinate on the tip corner C. + // To convert the triangle into the UV coordinate system we first compute + // position X on the vector |prev_pos - next_pos| that is the projection of + // point C onto vector |prev_pos - next_pos|: + // + // C + // /. \ + // / . \ + // / . \ + // N---X----------P + // + // Where next_pos is point (N), prev_pos is point (P) and tip_pos is the + // position of predicted coordinate (C). + // + const VectorD pn = prev_pos - next_pos; + const uint64_t pn_norm2_squared = pn.SquaredNorm(); + if (pn_norm2_squared != 0) { + // Compute the projection of C onto PN by computing dot product of CN with + // PN and normalizing it by length of PN. This gives us a factor |s| where + // |s = PN.Dot(CN) / PN.SquaredNorm2()|. This factor can be used to + // compute X in UV space |X_UV| as |X_UV = N_UV + s * PN_UV|. + const VectorD cn = tip_pos - next_pos; + const int64_t cn_dot_pn = pn.Dot(cn); + + const VectorD pn_uv = p_uv - n_uv; + // Because we perform all computations with integers, we don't explicitly + // compute the normalized factor |s|, but rather we perform all operations + // over UV vectors in a non-normalized coordinate system scaled with a + // scaling factor |pn_norm2_squared|: + // + // x_uv = X_UV * PN.Norm2Squared() + // + const VectorD x_uv = + n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv); + + const int64_t pn_absmax_element = + std::max(std::max(std::abs(pn[0]), std::abs(pn[1])), std::abs(pn[2])); + if (cn_dot_pn > std::numeric_limits::max() / pn_absmax_element) { + // return false if squared length calculation would overflow. + return false; + } + + // Compute squared length of vector CX in position coordinate system: + const VectorD x_pos = + next_pos + (cn_dot_pn * pn) / pn_norm2_squared; + const uint64_t cx_norm2_squared = (tip_pos - x_pos).SquaredNorm(); + + // Compute vector CX_UV in the uv space by rotating vector PN_UV by 90 + // degrees and scaling it with factor CX.Norm2() / PN.Norm2(): + // + // CX_UV = (CX.Norm2() / PN.Norm2()) * Rot(PN_UV) + // + // To preserve precision, we perform all operations in scaled space as + // explained above, so we want the final vector to be: + // + // cx_uv = CX_UV * PN.Norm2Squared() + // + // We can then rewrite the formula as: + // + // cx_uv = CX.Norm2() * PN.Norm2() * Rot(PN_UV) + // + VectorD cx_uv(pn_uv[1], -pn_uv[0]); // Rotated PN_UV. + // Compute CX.Norm2() * PN.Norm2() + const uint64_t norm_squared = + IntSqrt(cx_norm2_squared * pn_norm2_squared); + // Final cx_uv in the scaled coordinate space. + cx_uv = cx_uv * norm_squared; + + // Predicted uv coordinate is then computed by either adding or + // subtracting CX_UV to/from X_UV. + VectorD predicted_uv; + if (is_encoder_t) { + // When encoding, compute both possible vectors and determine which one + // results in a better prediction. + // Both vectors need to be transformed back from the scaled space to + // the real UV coordinate space. + const VectorD predicted_uv_0((x_uv + cx_uv) / + pn_norm2_squared); + const VectorD predicted_uv_1((x_uv - cx_uv) / + pn_norm2_squared); + const VectorD c_uv = GetTexCoordForEntryId(data_id, data); + if ((c_uv - predicted_uv_0).SquaredNorm() < + (c_uv - predicted_uv_1).SquaredNorm()) { + predicted_uv = predicted_uv_0; + orientations_.push_back(true); + } else { + predicted_uv = predicted_uv_1; + orientations_.push_back(false); + } + } else { + // When decoding the data, we already know which orientation to use. + if (orientations_.empty()) { + return false; + } + const bool orientation = orientations_.back(); + orientations_.pop_back(); + if (orientation) { + predicted_uv = (x_uv + cx_uv) / pn_norm2_squared; + } else { + predicted_uv = (x_uv - cx_uv) / pn_norm2_squared; + } + } + predicted_value_[0] = static_cast(predicted_uv[0]); + predicted_value_[1] = static_cast(predicted_uv[1]); + return true; + } + } + // Else we don't have available textures on both corners or the position data + // is invalid. For such cases we can't use positions for predicting the uv + // value and we resort to delta coding. + int data_offset = 0; + if (prev_data_id < data_id) { + // Use the value on the previous corner as the prediction. + data_offset = prev_data_id * kNumComponents; + } + if (next_data_id < data_id) { + // Use the value on the next corner as the prediction. + data_offset = next_data_id * kNumComponents; + } else { + // None of the other corners have a valid value. Use the last encoded value + // as the prediction if possible. + if (data_id > 0) { + data_offset = (data_id - 1) * kNumComponents; + } else { + // We are encoding the first value. Predict 0. + for (int i = 0; i < kNumComponents; ++i) { + predicted_value_[i] = 0; + } + return true; + } + } + for (int i = 0; i < kNumComponents; ++i) { + predicted_value_[i] = data[data_offset + i]; + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h new file mode 100644 index 000000000..064e1b44f --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h" + +// Prediction schemes can be used during encoding and decoding of vertex +// attributes to predict attribute values based on the previously +// encoded/decoded data. The differences between the original and predicted +// attribute values are used to compute correction values that can be usually +// encoded with fewer bits compared to the original data. +namespace draco { + +// Abstract base class for typed prediction schemes. It provides basic access +// to the encoded attribute and to the supplied prediction transform. +template > +class PredictionSchemeDecoder : public PredictionSchemeTypedDecoderInterface< + DataTypeT, typename TransformT::CorrType> { + public: + typedef DataTypeT DataType; + typedef TransformT Transform; + // Correction type needs to be defined in the prediction transform class. + typedef typename Transform::CorrType CorrType; + explicit PredictionSchemeDecoder(const PointAttribute *attribute) + : PredictionSchemeDecoder(attribute, Transform()) {} + PredictionSchemeDecoder(const PointAttribute *attribute, + const Transform &transform) + : attribute_(attribute), transform_(transform) {} + + bool DecodePredictionData(DecoderBuffer *buffer) override { + if (!transform_.DecodeTransformData(buffer)) { + return false; + } + return true; + } + + const PointAttribute *GetAttribute() const override { return attribute(); } + + // Returns the number of parent attributes that are needed for the prediction. + int GetNumParentAttributes() const override { return 0; } + + // Returns the type of each of the parent attribute. + GeometryAttribute::Type GetParentAttributeType(int /* i */) const override { + return GeometryAttribute::INVALID; + } + + // Sets the required parent attribute. + bool SetParentAttribute(const PointAttribute * /* att */) override { + return false; + } + + bool AreCorrectionsPositive() override { + return transform_.AreCorrectionsPositive(); + } + + PredictionSchemeTransformType GetTransformType() const override { + return transform_.GetType(); + } + + protected: + inline const PointAttribute *attribute() const { return attribute_; } + inline const Transform &transform() const { return transform_; } + inline Transform &transform() { return transform_; } + + private: + const PointAttribute *attribute_; + Transform transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h new file mode 100644 index 000000000..cf2a6ba6b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h @@ -0,0 +1,194 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for creating prediction schemes for decoders using the provided +// prediction method id. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h" +#include "draco/draco_features.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h" +#endif +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h" +#include "draco/compression/mesh/mesh_decoder.h" + +namespace draco { + +// Factory class for creating mesh prediction schemes. The factory implements +// operator() that is used to create an appropriate mesh prediction scheme in +// CreateMeshPredictionScheme() function in prediction_scheme_factory.h +template +struct MeshPredictionSchemeDecoderFactory { + // Operator () specialized for the wrap transform. Wrap transform can be used + // for all mesh prediction schemes. The specialization is done in compile time + // to prevent instantiations of unneeded combinations of prediction schemes + + // prediction transforms. + template + struct DispatchFunctor { + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeParallelogramDecoder( + attribute, transform, mesh_data)); + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + else if (method == MESH_PREDICTION_MULTI_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#endif + else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeConstrainedMultiParallelogramDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + else if (method == MESH_PREDICTION_TEX_COORDS_DEPRECATED) { + return std::unique_ptr>( + new MeshPredictionSchemeTexCoordsDecoder( + attribute, transform, mesh_data, bitstream_version)); + } +#endif + else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) { + return std::unique_ptr>( + new MeshPredictionSchemeTexCoordsPortableDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#endif + return nullptr; + } + }; + +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + // Operator () specialized for normal octahedron transforms. These transforms + // are currently used only by the geometric normal prediction scheme (the + // transform is also used by delta coding, but delta predictor is not + // constructed in this function). + template + struct DispatchFunctor { + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } + return nullptr; + } + }; + template + struct DispatchFunctor { + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalDecoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } + return nullptr; + } + }; +#endif + + template + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + return DispatchFunctor()( + method, attribute, transform, mesh_data, bitstream_version); + } +}; + +// Creates a prediction scheme for a given decoder and given prediction method. +// The prediction schemes are automatically initialized with decoder specific +// data if needed. +template +std::unique_ptr> +CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id, + const PointCloudDecoder *decoder, + const TransformT &transform) { + if (method == PREDICTION_NONE) { + return nullptr; + } + const PointAttribute *const att = decoder->point_cloud()->attribute(att_id); + if (decoder->GetGeometryType() == TRIANGULAR_MESH) { + // Cast the decoder to mesh decoder. This is not necessarily safe if there + // is some other decoder decides to use TRIANGULAR_MESH as the return type, + // but unfortunately there is not nice work around for this without using + // RTTI (double dispatch and similar concepts will not work because of the + // template nature of the prediction schemes). + const MeshDecoder *const mesh_decoder = + static_cast(decoder); + + auto ret = CreateMeshPredictionScheme< + MeshDecoder, PredictionSchemeDecoder, + MeshPredictionSchemeDecoderFactory>( + mesh_decoder, method, att_id, transform, decoder->bitstream_version()); + if (ret) { + return ret; + } + // Otherwise try to create another prediction scheme. + } + // Create delta decoder. + return std::unique_ptr>( + new PredictionSchemeDeltaDecoder(att, transform)); +} + +// Create a prediction scheme using a default transform constructor. +template +std::unique_ptr> +CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id, + const PointCloudDecoder *decoder) { + return CreatePredictionSchemeForDecoder( + method, att_id, decoder, TransformT()); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h new file mode 100644 index 000000000..6f19f7fdb --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h @@ -0,0 +1,53 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/core/decoder_buffer.h" + +// Prediction schemes can be used during encoding and decoding of attributes +// to predict attribute values based on the previously encoded/decoded data. +// See prediction_scheme.h for more details. +namespace draco { + +// Abstract interface for all prediction schemes used during attribute encoding. +class PredictionSchemeDecoderInterface : public PredictionSchemeInterface { + public: + // Method that can be used to decode any prediction scheme specific data + // from the input buffer. + virtual bool DecodePredictionData(DecoderBuffer *buffer) = 0; +}; + +// A specialized version of the prediction scheme interface for specific +// input and output data types. +// |entry_to_point_id_map| is the mapping between value entries to point ids +// of the associated point cloud, where one entry is defined as |num_components| +// values of the |in_data|. +// DataTypeT is the data type of input and predicted values. +// CorrTypeT is the data type used for storing corrected values. +template +class PredictionSchemeTypedDecoderInterface + : public PredictionSchemeDecoderInterface { + public: + // Reverts changes made by the prediction scheme during encoding. + virtual bool ComputeOriginalValues( + const CorrTypeT *in_corr, DataTypeT *out_data, int size, + int num_components, const PointIndex *entry_to_point_id_map) = 0; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h new file mode 100644 index 000000000..47c1532ad --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h @@ -0,0 +1,65 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// PredictionSchemeDecodingTransform is used to transform predicted values and +// correction values into the final original attribute values. +// DataTypeT is the data type of predicted values. +// CorrTypeT is the data type used for storing corrected values. It allows +// transforms to store corrections into a different type or format compared to +// the predicted data. +template +class PredictionSchemeDecodingTransform { + public: + typedef CorrTypeT CorrType; + PredictionSchemeDecodingTransform() : num_components_(0) {} + + void Init(int num_components) { num_components_ = num_components; } + + // Computes the original value from the input predicted value and the decoded + // corrections. The default implementation is equal to std:plus. + inline void ComputeOriginalValue(const DataTypeT *predicted_vals, + const CorrTypeT *corr_vals, + DataTypeT *out_original_vals) const { + static_assert(std::is_same::value, + "For the default prediction transform, correction and input " + "data must be of the same type."); + for (int i = 0; i < num_components_; ++i) { + out_original_vals[i] = predicted_vals[i] + corr_vals[i]; + } + } + + // Decodes any transform specific data. Called before Init() method. + bool DecodeTransformData(DecoderBuffer * /* buffer */) { return true; } + + // Should return true if all corrected values are guaranteed to be positive. + bool AreCorrectionsPositive() const { return false; } + + protected: + int num_components() const { return num_components_; } + + private: + int num_components_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h new file mode 100644 index 000000000..ae72c7120 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h @@ -0,0 +1,65 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" + +namespace draco { + +// Decoder for values encoded with delta coding. See the corresponding encoder +// for more details. +template +class PredictionSchemeDeltaDecoder + : public PredictionSchemeDecoder { + public: + using CorrType = + typename PredictionSchemeDecoder::CorrType; + // Initialized the prediction scheme. + explicit PredictionSchemeDeltaDecoder(const PointAttribute *attribute) + : PredictionSchemeDecoder(attribute) {} + PredictionSchemeDeltaDecoder(const PointAttribute *attribute, + const TransformT &transform) + : PredictionSchemeDecoder(attribute, transform) {} + + bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, + int size, int num_components, + const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return PREDICTION_DIFFERENCE; + } + bool IsInitialized() const override { return true; } +}; + +template +bool PredictionSchemeDeltaDecoder::ComputeOriginalValues( + const CorrType *in_corr, DataTypeT *out_data, int size, int num_components, + const PointIndex *) { + this->transform().Init(num_components); + // Decode the original value for the first element. + std::unique_ptr zero_vals(new DataTypeT[num_components]()); + this->transform().ComputeOriginalValue(zero_vals.get(), in_corr, out_data); + + // Decode data from the front using D(i) = D(i) + D(i - 1). + for (int i = num_components; i < size; i += num_components) { + this->transform().ComputeOriginalValue(out_data + i - num_components, + in_corr + i, out_data + i); + } + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h new file mode 100644 index 000000000..324afafa6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h @@ -0,0 +1,69 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" + +namespace draco { + +// Basic prediction scheme based on computing backward differences between +// stored attribute values (also known as delta-coding). Usually works better +// than the reference point prediction scheme, because nearby values are often +// encoded next to each other. +template +class PredictionSchemeDeltaEncoder + : public PredictionSchemeEncoder { + public: + using CorrType = + typename PredictionSchemeEncoder::CorrType; + // Initialized the prediction scheme. + explicit PredictionSchemeDeltaEncoder(const PointAttribute *attribute) + : PredictionSchemeEncoder(attribute) {} + PredictionSchemeDeltaEncoder(const PointAttribute *attribute, + const TransformT &transform) + : PredictionSchemeEncoder(attribute, transform) {} + + bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrType *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) override; + PredictionSchemeMethod GetPredictionMethod() const override { + return PREDICTION_DIFFERENCE; + } + bool IsInitialized() const override { return true; } +}; + +template +bool PredictionSchemeDeltaEncoder< + DataTypeT, TransformT>::ComputeCorrectionValues(const DataTypeT *in_data, + CorrType *out_corr, + int size, + int num_components, + const PointIndex *) { + this->transform().Init(in_data, size, num_components); + // Encode data from the back using D(i) = D(i) - D(i - 1). + for (int i = size - num_components; i > 0; i -= num_components) { + this->transform().ComputeCorrection( + in_data + i, in_data + i - num_components, out_corr + i); + } + // Encode correction for the first element. + std::unique_ptr zero_vals(new DataTypeT[num_components]()); + this->transform().ComputeCorrection(in_data, zero_vals.get(), out_corr); + return true; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h new file mode 100644 index 000000000..2a211a9fc --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_ + +#include + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h" + +// Prediction schemes can be used during encoding and decoding of vertex +// attributes to predict attribute values based on the previously +// encoded/decoded data. The differences between the original and predicted +// attribute values are used to compute correction values that can be usually +// encoded with fewer bits compared to the original data. +namespace draco { + +// Abstract base class for typed prediction schemes. It provides basic access +// to the encoded attribute and to the supplied prediction transform. +template > +class PredictionSchemeEncoder : public PredictionSchemeTypedEncoderInterface< + DataTypeT, typename TransformT::CorrType> { + public: + typedef DataTypeT DataType; + typedef TransformT Transform; + // Correction type needs to be defined in the prediction transform class. + typedef typename Transform::CorrType CorrType; + explicit PredictionSchemeEncoder(const PointAttribute *attribute) + : PredictionSchemeEncoder(attribute, Transform()) {} + PredictionSchemeEncoder(const PointAttribute *attribute, + const Transform &transform) + : attribute_(attribute), transform_(transform) {} + + bool EncodePredictionData(EncoderBuffer *buffer) override { + if (!transform_.EncodeTransformData(buffer)) { + return false; + } + return true; + } + + const PointAttribute *GetAttribute() const override { return attribute(); } + + // Returns the number of parent attributes that are needed for the prediction. + int GetNumParentAttributes() const override { return 0; } + + // Returns the type of each of the parent attribute. + GeometryAttribute::Type GetParentAttributeType(int /* i */) const override { + return GeometryAttribute::INVALID; + } + + // Sets the required parent attribute. + bool SetParentAttribute(const PointAttribute * /* att */) override { + return false; + } + + bool AreCorrectionsPositive() override { + return transform_.AreCorrectionsPositive(); + } + + PredictionSchemeTransformType GetTransformType() const override { + return transform_.GetType(); + } + + protected: + inline const PointAttribute *attribute() const { return attribute_; } + inline const Transform &transform() const { return transform_; } + inline Transform &transform() { return transform_; } + + private: + const PointAttribute *attribute_; + Transform transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc new file mode 100644 index 000000000..f410a6cd2 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc @@ -0,0 +1,85 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" + +namespace draco { + +PredictionSchemeMethod SelectPredictionMethod( + int att_id, const PointCloudEncoder *encoder) { + if (encoder->options()->GetSpeed() >= 10) { + // Selected fastest, though still doing some compression. + return PREDICTION_DIFFERENCE; + } + if (encoder->GetGeometryType() == TRIANGULAR_MESH) { + // Use speed setting to select the best encoding method. + const PointAttribute *const att = encoder->point_cloud()->attribute(att_id); + if (att->attribute_type() == GeometryAttribute::TEX_COORD) { + if (encoder->options()->GetSpeed() < 4) { + // Use texture coordinate prediction for speeds 0, 1, 2, 3. + return MESH_PREDICTION_TEX_COORDS_PORTABLE; + } + } + if (att->attribute_type() == GeometryAttribute::NORMAL) { +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + if (encoder->options()->GetSpeed() < 4) { + // Use geometric normal prediction for speeds 0, 1, 2, 3. + // For this prediction, the position attribute needs to be either + // integer or quantized as well. + const int pos_att_id = encoder->point_cloud()->GetNamedAttributeId( + GeometryAttribute::POSITION); + const PointAttribute *const pos_att = + encoder->point_cloud()->GetNamedAttribute( + GeometryAttribute::POSITION); + if (pos_att && (IsDataTypeIntegral(pos_att->data_type()) || + encoder->options()->GetAttributeInt( + pos_att_id, "quantization_bits", -1) > 0)) { + return MESH_PREDICTION_GEOMETRIC_NORMAL; + } + } +#endif + return PREDICTION_DIFFERENCE; // default + } + // Handle other attribute types. + if (encoder->options()->GetSpeed() >= 8) { + return PREDICTION_DIFFERENCE; + } + if (encoder->options()->GetSpeed() >= 2 || + encoder->point_cloud()->num_points() < 40) { + // Parallelogram prediction is used for speeds 2 - 7 or when the overhead + // of using constrained multi-parallelogram would be too high. + return MESH_PREDICTION_PARALLELOGRAM; + } + // Multi-parallelogram is used for speeds 0, 1. + return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM; + } + // Default option is delta coding. + return PREDICTION_DIFFERENCE; +} + +// Returns the preferred prediction scheme based on the encoder options. +PredictionSchemeMethod GetPredictionMethodFromOptions( + int att_id, const EncoderOptions &options) { + const int pred_type = + options.GetAttributeInt(att_id, "prediction_scheme", -1); + if (pred_type == -1) { + return PREDICTION_UNDEFINED; + } + if (pred_type < 0 || pred_type >= NUM_PREDICTION_SCHEMES) { + return PREDICTION_NONE; + } + return static_cast(pred_type); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h new file mode 100644 index 000000000..40a7683aa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h @@ -0,0 +1,129 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for creating prediction schemes for encoders using the provided +// prediction method id. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_ + +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h" +#endif +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h" +#include "draco/compression/mesh/mesh_encoder.h" + +namespace draco { + +// Selects a prediction method based on the input geometry type and based on the +// encoder options. +PredictionSchemeMethod SelectPredictionMethod(int att_id, + const PointCloudEncoder *encoder); + +// Factory class for creating mesh prediction schemes. +template +struct MeshPredictionSchemeEncoderFactory { + template + std::unique_ptr> operator()( + PredictionSchemeMethod method, const PointAttribute *attribute, + const TransformT &transform, const MeshDataT &mesh_data, + uint16_t bitstream_version) { + if (method == MESH_PREDICTION_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeParallelogramEncoder( + attribute, transform, mesh_data)); + } else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) { + return std::unique_ptr>( + new MeshPredictionSchemeConstrainedMultiParallelogramEncoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) { + return std::unique_ptr>( + new MeshPredictionSchemeTexCoordsPortableEncoder< + DataTypeT, TransformT, MeshDataT>(attribute, transform, + mesh_data)); + } +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return std::unique_ptr>( + new MeshPredictionSchemeGeometricNormalEncoder( + attribute, transform, mesh_data)); + } +#endif + return nullptr; + } +}; + +// Creates a prediction scheme for a given encoder and given prediction method. +// The prediction schemes are automatically initialized with encoder specific +// data if needed. +template +std::unique_ptr> +CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id, + const PointCloudEncoder *encoder, + const TransformT &transform) { + const PointAttribute *const att = encoder->point_cloud()->attribute(att_id); + if (method == PREDICTION_UNDEFINED) { + method = SelectPredictionMethod(att_id, encoder); + } + if (method == PREDICTION_NONE) { + return nullptr; // No prediction is used. + } + if (encoder->GetGeometryType() == TRIANGULAR_MESH) { + // Cast the encoder to mesh encoder. This is not necessarily safe if there + // is some other encoder decides to use TRIANGULAR_MESH as the return type, + // but unfortunately there is not nice work around for this without using + // RTTI (double dispatch and similar concepts will not work because of the + // template nature of the prediction schemes). + const MeshEncoder *const mesh_encoder = + static_cast(encoder); + auto ret = CreateMeshPredictionScheme< + MeshEncoder, PredictionSchemeEncoder, + MeshPredictionSchemeEncoderFactory>( + mesh_encoder, method, att_id, transform, kDracoMeshBitstreamVersion); + if (ret) { + return ret; + } + // Otherwise try to create another prediction scheme. + } + // Create delta encoder. + return std::unique_ptr>( + new PredictionSchemeDeltaEncoder(att, transform)); +} + +// Create a prediction scheme using a default transform constructor. +template +std::unique_ptr> +CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id, + const PointCloudEncoder *encoder) { + return CreatePredictionSchemeForEncoder( + method, att_id, encoder, TransformT()); +} + +// Returns the preferred prediction scheme based on the encoder options. +PredictionSchemeMethod GetPredictionMethodFromOptions( + int att_id, const EncoderOptions &options); + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h new file mode 100644 index 000000000..37aa9f76a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h @@ -0,0 +1,55 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/core/encoder_buffer.h" + +// Prediction schemes can be used during encoding and decoding of attributes +// to predict attribute values based on the previously encoded/decoded data. +// See prediction_scheme.h for more details. +namespace draco { + +// Abstract interface for all prediction schemes used during attribute encoding. +class PredictionSchemeEncoderInterface : public PredictionSchemeInterface { + public: + // Method that can be used to encode any prediction scheme specific data + // into the output buffer. + virtual bool EncodePredictionData(EncoderBuffer *buffer) = 0; +}; + +// A specialized version of the prediction scheme interface for specific +// input and output data types. +// |entry_to_point_id_map| is the mapping between value entries to point ids +// of the associated point cloud, where one entry is defined as |num_components| +// values of the |in_data|. +// DataTypeT is the data type of input and predicted values. +// CorrTypeT is the data type used for storing corrected values. +template +class PredictionSchemeTypedEncoderInterface + : public PredictionSchemeEncoderInterface { + public: + // Applies the prediction scheme when encoding the attribute. + // |in_data| contains value entries to be encoded. + // |out_corr| is an output array containing the to be encoded corrections. + virtual bool ComputeCorrectionValues( + const DataTypeT *in_data, CorrTypeT *out_corr, int size, + int num_components, const PointIndex *entry_to_point_id_map) = 0; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h new file mode 100644 index 000000000..0929492aa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h @@ -0,0 +1,77 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// PredictionSchemeEncodingTransform is used to transform predicted values into +// correction values. +// CorrTypeT is the data type used for storing corrected values. It allows +// transforms to store corrections into a different type or format compared to +// the predicted data. +template +class PredictionSchemeEncodingTransform { + public: + typedef CorrTypeT CorrType; + PredictionSchemeEncodingTransform() : num_components_(0) {} + + PredictionSchemeTransformType GetType() const { + return PREDICTION_TRANSFORM_DELTA; + } + + // Performs any custom initialization of the transform for the encoder. + // |size| = total number of values in |orig_data| (i.e., number of entries * + // number of components). + void Init(const DataTypeT * /* orig_data */, int /* size */, + int num_components) { + num_components_ = num_components; + } + + // Computes the corrections based on the input original values and the + // predicted values. The correction is always computed for all components + // of the input element. |val_id| is the id of the input value + // (i.e., element_id * num_components). The default implementation is equal to + // std::minus. + inline void ComputeCorrection(const DataTypeT *original_vals, + const DataTypeT *predicted_vals, + CorrTypeT *out_corr_vals) { + static_assert(std::is_same::value, + "For the default prediction transform, correction and input " + "data must be of the same type."); + for (int i = 0; i < num_components_; ++i) { + out_corr_vals[i] = original_vals[i] - predicted_vals[i]; + } + } + + // Encode any transform specific data. + bool EncodeTransformData(EncoderBuffer * /* buffer */) { return true; } + + // Should return true if all corrected values are guaranteed to be positive. + bool AreCorrectionsPositive() const { return false; } + + protected: + int num_components() const { return num_components_; } + + private: + int num_components_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h new file mode 100644 index 000000000..b36c4c8a2 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h @@ -0,0 +1,85 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for creating prediction schemes from a provided prediction method +// name. The functions in this file can create only basic prediction schemes +// that don't require any encoder or decoder specific data. To create more +// sophisticated prediction schemes, use functions from either +// prediction_scheme_encoder_factory.h or, +// prediction_scheme_decoder_factory.h. + +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_ + +#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h" +#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/mesh/mesh_attribute_corner_table.h" + +namespace draco { + +template +std::unique_ptr CreateMeshPredictionScheme( + const EncodingDataSourceT *source, PredictionSchemeMethod method, + int att_id, const typename PredictionSchemeT::Transform &transform, + uint16_t bitstream_version) { + const PointAttribute *const att = source->point_cloud()->attribute(att_id); + if (source->GetGeometryType() == TRIANGULAR_MESH && + (method == MESH_PREDICTION_PARALLELOGRAM || + method == MESH_PREDICTION_MULTI_PARALLELOGRAM || + method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM || + method == MESH_PREDICTION_TEX_COORDS_PORTABLE || + method == MESH_PREDICTION_GEOMETRIC_NORMAL || + method == MESH_PREDICTION_TEX_COORDS_DEPRECATED)) { + const CornerTable *const ct = source->GetCornerTable(); + const MeshAttributeIndicesEncodingData *const encoding_data = + source->GetAttributeEncodingData(att_id); + if (ct == nullptr || encoding_data == nullptr) { + // No connectivity data found. + return nullptr; + } + // Connectivity data exists. + const MeshAttributeCornerTable *const att_ct = + source->GetAttributeCornerTable(att_id); + if (att_ct != nullptr) { + typedef MeshPredictionSchemeData MeshData; + MeshData md; + md.Set(source->mesh(), att_ct, + &encoding_data->encoded_attribute_value_index_to_corner_map, + &encoding_data->vertex_to_encoded_attribute_value_index_map); + MeshPredictionSchemeFactoryT factory; + auto ret = factory(method, att, transform, md, bitstream_version); + if (ret) { + return ret; + } + } else { + typedef MeshPredictionSchemeData MeshData; + MeshData md; + md.Set(source->mesh(), ct, + &encoding_data->encoded_attribute_value_index_to_corner_map, + &encoding_data->vertex_to_encoded_attribute_value_index_map); + MeshPredictionSchemeFactoryT factory; + auto ret = factory(method, att, transform, md, bitstream_version); + if (ret) { + return ret; + } + } + } + return nullptr; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h new file mode 100644 index 000000000..c9b370693 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h @@ -0,0 +1,60 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_ + +#include "draco/attributes/point_attribute.h" +#include "draco/compression/config/compression_shared.h" + +// Prediction schemes can be used during encoding and decoding of attributes +// to predict attribute values based on the previously encoded/decoded data. +// See prediction_scheme.h for more details. +namespace draco { + +// Abstract interface for all prediction schemes used during attribute encoding. +class PredictionSchemeInterface { + public: + virtual ~PredictionSchemeInterface() = default; + virtual PredictionSchemeMethod GetPredictionMethod() const = 0; + + // Returns the encoded attribute. + virtual const PointAttribute *GetAttribute() const = 0; + + // Returns true when the prediction scheme is initialized with all data it + // needs. + virtual bool IsInitialized() const = 0; + + // Returns the number of parent attributes that are needed for the prediction. + virtual int GetNumParentAttributes() const = 0; + + // Returns the type of each of the parent attribute. + virtual GeometryAttribute::Type GetParentAttributeType(int i) const = 0; + + // Sets the required parent attribute. + // Returns false if the attribute doesn't meet the requirements of the + // prediction scheme. + virtual bool SetParentAttribute(const PointAttribute *att) = 0; + + // Method should return true if the prediction scheme guarantees that all + // correction values are always positive (or at least non-negative). + virtual bool AreCorrectionsPositive() = 0; + + // Returns the transform type used by the prediction scheme. + virtual PredictionSchemeTransformType GetTransformType() const = 0; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_INTERFACE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h new file mode 100644 index 000000000..5a6c7c2dd --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h @@ -0,0 +1,118 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// Class for converting correction values transformed by the canonicalized +// normal octahedron transform back to the original values. See the +// corresponding encoder for more details. +template +class PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform + : public PredictionSchemeNormalOctahedronCanonicalizedTransformBase< + DataTypeT> { + public: + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform() {} + + // Dummy to fulfill concept. + void Init(int num_components) {} + + bool DecodeTransformData(DecoderBuffer *buffer) { + DataTypeT max_quantized_value, center_value; + if (!buffer->Decode(&max_quantized_value)) { + return false; + } + if (!buffer->Decode(¢er_value)) { + return false; + } + (void)center_value; + if (!this->set_max_quantized_value(max_quantized_value)) { + return false; + } + // Account for reading wrong values, e.g., due to fuzzing. + if (this->quantization_bits() < 2) { + return false; + } + if (this->quantization_bits() > 30) { + return false; + } + return true; + } + + inline void ComputeOriginalValue(const DataType *pred_vals, + const CorrType *corr_vals, + DataType *out_orig_vals) const { + DRACO_DCHECK_LE(pred_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(pred_vals[1], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[1], 2 * this->center_value()); + + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, corr_vals[0]); + DRACO_DCHECK_LE(0, corr_vals[1]); + + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = Point2(corr_vals[0], corr_vals[1]); + const Point2 orig = ComputeOriginalValue(pred, corr); + + out_orig_vals[0] = orig[0]; + out_orig_vals[1] = orig[1]; + } + + private: + Point2 ComputeOriginalValue(Point2 pred, Point2 corr) const { + const Point2 t(this->center_value(), this->center_value()); + pred = pred - t; + const bool pred_is_in_diamond = this->IsInDiamond(pred[0], pred[1]); + if (!pred_is_in_diamond) { + this->InvertDiamond(&pred[0], &pred[1]); + } + const bool pred_is_in_bottom_left = this->IsInBottomLeft(pred); + const int32_t rotation_count = this->GetRotationCount(pred); + if (!pred_is_in_bottom_left) { + pred = this->RotatePoint(pred, rotation_count); + } + Point2 orig = pred + corr; + orig[0] = this->ModMax(orig[0]); + orig[1] = this->ModMax(orig[1]); + if (!pred_is_in_bottom_left) { + const int32_t reverse_rotation_count = (4 - rotation_count) % 4; + orig = this->RotatePoint(orig, reverse_rotation_count); + } + if (!pred_is_in_diamond) { + this->InvertDiamond(&orig[0], &orig[1]); + } + orig = orig + t; + return orig; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_DECODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h new file mode 100644 index 000000000..0dc96967b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h @@ -0,0 +1,116 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// The transform works on octahedral coordinates for normals. The square is +// subdivided into four inner triangles (diamond) and four outer triangles. The +// inner triangles are associated with the upper part of the octahedron and the +// outer triangles are associated with the lower part. +// Given a prediction value P and the actual value Q that should be encoded, +// this transform first checks if P is outside the diamond. If so, the outer +// triangles are flipped towards the inside and vice versa. Then it checks if p +// is in the bottom left quadrant. If it is not, it rotates p and q accordingly. +// The actual correction value is then based on the mapped and rotated P and Q +// values. The inversion tends to result in shorter correction vectors and the +// rotation makes it so that all long correction values are positive, reducing +// the possible value range of the correction values and increasing the +// occurrences of positive large correction values, which helps the entropy +// encoder. This is possible since P is also known by the decoder, see also +// ComputeCorrection and ComputeOriginalValue functions. +// Note that the tile is not periodic, which implies that the outer edges can +// not be identified, which requires us to use an odd number of values on each +// axis. +// DataTypeT is expected to be some integral type. +// +template +class PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform + : public PredictionSchemeNormalOctahedronCanonicalizedTransformBase< + DataTypeT> { + public: + typedef PredictionSchemeNormalOctahedronCanonicalizedTransformBase + Base; + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform( + DataType max_quantized_value) + : Base(max_quantized_value) {} + + // Dummy function to fulfill concept. + void Init(const DataTypeT *orig_data, int size, int num_components) {} + + bool EncodeTransformData(EncoderBuffer *buffer) { + buffer->Encode(this->max_quantized_value()); + buffer->Encode(this->center_value()); + return true; + } + + inline void ComputeCorrection(const DataType *orig_vals, + const DataType *pred_vals, + CorrType *out_corr_vals) const { + DRACO_DCHECK_LE(pred_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(pred_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, orig_vals[0]); + DRACO_DCHECK_LE(0, orig_vals[1]); + + const Point2 orig = Point2(orig_vals[0], orig_vals[1]); + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = ComputeCorrection(orig, pred); + + out_corr_vals[0] = corr[0]; + out_corr_vals[1] = corr[1]; + } + + private: + Point2 ComputeCorrection(Point2 orig, Point2 pred) const { + const Point2 t(this->center_value(), this->center_value()); + orig = orig - t; + pred = pred - t; + if (!this->IsInDiamond(pred[0], pred[1])) { + this->InvertDiamond(&orig[0], &orig[1]); + this->InvertDiamond(&pred[0], &pred[1]); + } + if (!this->IsInBottomLeft(pred)) { + const int32_t rotation_count = this->GetRotationCount(pred); + orig = this->RotatePoint(orig, rotation_count); + pred = this->RotatePoint(pred, rotation_count); + } + Point2 corr = orig - pred; + corr[0] = this->MakePositive(corr[0]); + corr[1] = this->MakePositive(corr[1]); + return corr; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h new file mode 100644 index 000000000..4a1e3a67b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h @@ -0,0 +1,102 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/bit_utils.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// Base class containing shared functionality used by both encoding and decoding +// canonicalized normal octahedron prediction scheme transforms. See the +// encoding transform for more details about the method. +template +class PredictionSchemeNormalOctahedronCanonicalizedTransformBase + : public PredictionSchemeNormalOctahedronTransformBase { + public: + typedef PredictionSchemeNormalOctahedronTransformBase Base; + typedef VectorD Point2; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronCanonicalizedTransformBase() : Base() {} + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronCanonicalizedTransformBase( + DataType mod_value) + : Base(mod_value) {} + + static constexpr PredictionSchemeTransformType GetType() { + return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED; + } + + int32_t GetRotationCount(Point2 pred) const { + const DataType sign_x = pred[0]; + const DataType sign_y = pred[1]; + + int32_t rotation_count = 0; + if (sign_x == 0) { + if (sign_y == 0) { + rotation_count = 0; + } else if (sign_y > 0) { + rotation_count = 3; + } else { + rotation_count = 1; + } + } else if (sign_x > 0) { + if (sign_y >= 0) { + rotation_count = 2; + } else { + rotation_count = 1; + } + } else { + if (sign_y <= 0) { + rotation_count = 0; + } else { + rotation_count = 3; + } + } + return rotation_count; + } + + Point2 RotatePoint(Point2 p, int32_t rotation_count) const { + switch (rotation_count) { + case 1: + return Point2(p[1], -p[0]); + case 2: + return Point2(-p[0], -p[1]); + case 3: + return Point2(-p[1], p[0]); + default: + return p; + } + } + + bool IsInBottomLeft(const Point2 &p) const { + if (p[0] == 0 && p[1] == 0) { + return true; + } + return (p[0] < 0 && p[1] <= 0); + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc new file mode 100644 index 000000000..8c8932f77 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc @@ -0,0 +1,192 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h" +#include "draco/core/draco_test_base.h" + +namespace { + +class PredictionSchemeNormalOctahedronCanonicalizedTransformTest + : public ::testing::Test { + protected: + typedef draco::PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform< + int32_t> + Transform; + typedef Transform::Point2 Point2; + + void TestComputeCorrection(const Transform &transform, const int32_t &ox, + const int32_t &oy, const int32_t &px, + const int32_t &py, const int32_t &cx, + const int32_t &cy) { + const int32_t o[2] = {ox + 7, oy + 7}; + const int32_t p[2] = {px + 7, py + 7}; + int32_t corr[2] = {500, 500}; + transform.ComputeCorrection(o, p, corr); + ASSERT_EQ(corr[0], (cx + 15) % 15); + ASSERT_EQ(corr[1], (cy + 15) % 15); + } + + void TestGetRotationCount(const Transform &transform, const Point2 &pred, + const int32_t &rot_dir) { + const int32_t rotation_count = transform.GetRotationCount(pred); + ASSERT_EQ(rot_dir, rotation_count); + } + + void TestRotateRepresentation(const Transform &transform, const Point2 &org, + const Point2 &pred, const Point2 &rot_org, + const Point2 &rot_pred) { + const int32_t rotation_count = transform.GetRotationCount(pred); + const Point2 res_org = transform.RotatePoint(org, rotation_count); + const Point2 res_pred = transform.RotatePoint(pred, rotation_count); + ASSERT_EQ(rot_org[0], res_org[0]); + ASSERT_EQ(rot_org[1], res_org[1]); + ASSERT_EQ(rot_pred[0], res_pred[0]); + ASSERT_EQ(rot_pred[1], res_pred[1]); + } +}; + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Init) { + const Transform transform(15); + ASSERT_TRUE(transform.AreCorrectionsPositive()); +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + IsInBottomLeft) { + const Transform transform(15); + ASSERT_TRUE(transform.IsInBottomLeft(Point2(0, 0))); + ASSERT_TRUE(transform.IsInBottomLeft(Point2(-1, -1))); + ASSERT_TRUE(transform.IsInBottomLeft(Point2(-7, -7))); + + ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, 1))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, 7))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(-1, 1))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(-7, 7))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, -1))); + ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, -7))); +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + GetRotationCount) { + const Transform transform(15); + TestGetRotationCount(transform, Point2(1, 2), 2); // top right + TestGetRotationCount(transform, Point2(-1, 2), 3); // top left + TestGetRotationCount(transform, Point2(1, -2), 1); // bottom right + TestGetRotationCount(transform, Point2(-1, -2), 0); // bottom left + TestGetRotationCount(transform, Point2(0, 2), 3); // top left + TestGetRotationCount(transform, Point2(0, -2), 1); // bottom right + TestGetRotationCount(transform, Point2(2, 0), 2); // top right + TestGetRotationCount(transform, Point2(-2, 0), 0); // bottom left + TestGetRotationCount(transform, Point2(0, 0), 0); // bottom left +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + RotateRepresentation) { + const Transform transform(15); + // p top left; shift clockwise by 3 + TestRotateRepresentation(transform, Point2(1, 2), Point2(-3, 1), + Point2(-2, 1), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(-3, 1), + Point2(2, -1), Point2(-1, -3)); // q bottom left + TestRotateRepresentation(transform, Point2(1, -2), Point2(-3, 1), + Point2(2, 1), Point2(-1, -3)); // q bottom right + TestRotateRepresentation(transform, Point2(-1, 2), Point2(-3, 1), + Point2(-2, -1), Point2(-1, -3)); // q top left + // p top right; shift clockwise by 2 (flip) + TestRotateRepresentation(transform, Point2(1, 1), Point2(1, 3), + Point2(-1, -1), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(1, 3), + Point2(1, 2), Point2(-1, -3)); // q bottom left + TestRotateRepresentation(transform, Point2(-1, 2), Point2(1, 3), + Point2(1, -2), Point2(-1, -3)); // q top left + TestRotateRepresentation(transform, Point2(1, -2), Point2(1, 3), + Point2(-1, 2), Point2(-1, -3)); // q bottom right + // p bottom right; shift clockwise by 1 + TestRotateRepresentation(transform, Point2(1, 2), Point2(3, -1), + Point2(2, -1), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(1, -2), Point2(3, -1), + Point2(-2, -1), Point2(-1, -3)); // q bottom right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(3, -1), + Point2(-2, 1), Point2(-1, -3)); // q bottom left + TestRotateRepresentation(transform, Point2(-1, 2), Point2(3, -1), + Point2(2, 1), Point2(-1, -3)); // q top left + // p bottom left; no change + TestRotateRepresentation(transform, Point2(1, 2), Point2(-1, -3), + Point2(1, 2), Point2(-1, -3)); // q top right + TestRotateRepresentation(transform, Point2(-1, 2), Point2(-1, -3), + Point2(-1, 2), Point2(-1, -3)); // q top left + TestRotateRepresentation(transform, Point2(1, -2), Point2(-1, -3), + Point2(1, -2), Point2(-1, -3)); // q bottom right + TestRotateRepresentation(transform, Point2(-1, -2), Point2(-1, -3), + Point2(-1, -2), Point2(-1, -3)); // q bottom left +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, + ComputeCorrection) { + const Transform transform(15); + TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0); + TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0); + // inside diamond; p top right + TestComputeCorrection(transform, 3, 4, 1, 2, -2, -2); // q top right + TestComputeCorrection(transform, -3, 4, 1, 2, 4, -2); // q top left + TestComputeCorrection(transform, 3, -4, 1, 2, -2, 6); // q bottom right + TestComputeCorrection(transform, -3, -4, 1, 2, 4, 6); // q bottom left + // inside diamond; p top left + TestComputeCorrection(transform, 3, 4, -1, 2, -2, 4); // q top right + TestComputeCorrection(transform, -3, 4, -1, 2, -2, -2); // q top left + TestComputeCorrection(transform, 3, -4, -1, 2, 6, 4); // q bottom right + TestComputeCorrection(transform, -3, -4, -1, 2, 6, -2); // q bottom left + // inside diamond; p bottom right + TestComputeCorrection(transform, 3, 4, 1, -2, 6, -2); // q top right + TestComputeCorrection(transform, -3, 4, 1, -2, 6, 4); // q top left + TestComputeCorrection(transform, 3, -4, 1, -2, -2, -2); // q bottom right + TestComputeCorrection(transform, -3, -4, 1, -2, -2, 4); // q bottom left + // inside diamond; p bottom left + TestComputeCorrection(transform, 3, 4, -1, -2, 4, 6); // q top right + TestComputeCorrection(transform, -3, 4, -1, -2, -2, 6); // q top left + TestComputeCorrection(transform, 3, -4, -1, -2, 4, -2); // q bottom right + TestComputeCorrection(transform, -3, -4, -1, -2, -2, -2); // q bottom left + // outside diamond; p top right + TestComputeCorrection(transform, 1, 2, 5, 4, -2, -4); // q top right + TestComputeCorrection(transform, -1, 2, 5, 4, -7, -4); // q top left + TestComputeCorrection(transform, 1, -2, 5, 4, -2, -7); // q bottom right + TestComputeCorrection(transform, -1, -2, 5, 4, -7, -7); // q bottom left + // outside diamond; p top left + TestComputeCorrection(transform, 1, 2, -5, 4, -4, -7); // q top right + TestComputeCorrection(transform, -1, 2, -5, 4, -4, -2); // q top left + TestComputeCorrection(transform, 1, -2, -5, 4, -7, -7); // q bottom right + TestComputeCorrection(transform, -1, -2, -5, 4, -7, -2); // q bottom left + // outside diamond; p bottom right + TestComputeCorrection(transform, 1, 2, 5, -4, -7, -2); // q top right + TestComputeCorrection(transform, -1, 2, 5, -4, -7, -7); // q top left + TestComputeCorrection(transform, 1, -2, 5, -4, -4, -2); // q bottom right + TestComputeCorrection(transform, -1, -2, 5, -4, -4, -7); // q bottom left + // outside diamond; p bottom left + TestComputeCorrection(transform, 1, 2, -5, -4, -7, -7); // q top right + TestComputeCorrection(transform, -1, 2, -5, -4, -2, -7); // q top left + TestComputeCorrection(transform, 1, -2, -5, -4, -7, -4); // q bottom right + TestComputeCorrection(transform, -1, -2, -5, -4, -2, -4); // q bottom left + + TestComputeCorrection(transform, -1, -2, 7, 7, -5, -6); + TestComputeCorrection(transform, 0, 0, 7, 7, 7, 7); + TestComputeCorrection(transform, -1, -2, 0, -2, 0, 1); +} + +TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Interface) { + const Transform transform(15); + ASSERT_EQ(transform.max_quantized_value(), 15); + ASSERT_EQ(transform.center_value(), 7); + ASSERT_EQ(transform.quantization_bits(), 4); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h new file mode 100644 index 000000000..a1bc4a327 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h @@ -0,0 +1,103 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" +#include "draco/draco_features.h" + +namespace draco { + +// Class for converting correction values transformed by the octahedral normal +// transform back to the original values. See the corresponding encoder for more +// details. +template +class PredictionSchemeNormalOctahedronDecodingTransform + : public PredictionSchemeNormalOctahedronTransformBase { + public: + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronDecodingTransform() {} + + // Dummy function to fulfill concept. + void Init(int num_components) {} + bool DecodeTransformData(DecoderBuffer *buffer) { + DataTypeT max_quantized_value, center_value; + if (!buffer->Decode(&max_quantized_value)) { + return false; + } + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + if (!buffer->Decode(¢er_value)) { + return false; + } + } + (void)center_value; + return this->set_max_quantized_value(max_quantized_value); + } + + inline void ComputeOriginalValue(const DataType *pred_vals, + const CorrType *corr_vals, + DataType *out_orig_vals) const { + DRACO_DCHECK_LE(pred_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(pred_vals[1], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[0], 2 * this->center_value()); + DRACO_DCHECK_LE(corr_vals[1], 2 * this->center_value()); + + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, corr_vals[0]); + DRACO_DCHECK_LE(0, corr_vals[1]); + + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = Point2(corr_vals[0], corr_vals[1]); + const Point2 orig = ComputeOriginalValue(pred, corr); + + out_orig_vals[0] = orig[0]; + out_orig_vals[1] = orig[1]; + } + + private: + Point2 ComputeOriginalValue(Point2 pred, const Point2 &corr) const { + const Point2 t(this->center_value(), this->center_value()); + pred = pred - t; + + const bool pred_is_in_diamond = this->IsInDiamond(pred[0], pred[1]); + if (!pred_is_in_diamond) { + this->InvertDiamond(&pred[0], &pred[1]); + } + Point2 orig = pred + corr; + orig[0] = this->ModMax(orig[0]); + orig[1] = this->ModMax(orig[1]); + if (!pred_is_in_diamond) { + this->InvertDiamond(&orig[0], &orig[1]); + } + orig = orig + t; + return orig; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_DECODING_TRANSFORM_H_ +#endif diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h new file mode 100644 index 000000000..4abfef669 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h @@ -0,0 +1,105 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// The transform works on octahedral coordinates for normals. The square is +// subdivided into four inner triangles (diamond) and four outer triangles. The +// inner triangles are associated with the upper part of the octahedron and the +// outer triangles are associated with the lower part. +// Given a prediction value P and the actual value Q that should be encoded, +// this transform first checks if P is outside the diamond. If so, the outer +// triangles are flipped towards the inside and vice versa. The actual +// correction value is then based on the mapped P and Q values. This tends to +// result in shorter correction vectors. +// This is possible since the P value is also known by the decoder, see also +// ComputeCorrection and ComputeOriginalValue functions. +// Note that the tile is not periodic, which implies that the outer edges can +// not be identified, which requires us to use an odd number of values on each +// axis. +// DataTypeT is expected to be some integral type. +// +template +class PredictionSchemeNormalOctahedronEncodingTransform + : public PredictionSchemeNormalOctahedronTransformBase { + public: + typedef PredictionSchemeNormalOctahedronTransformBase Base; + typedef VectorD Point2; + typedef DataTypeT CorrType; + typedef DataTypeT DataType; + + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronEncodingTransform( + DataType max_quantized_value) + : Base(max_quantized_value) {} + + void Init(const DataTypeT *orig_data, int size, int num_components) {} + + bool EncodeTransformData(EncoderBuffer *buffer) { + buffer->Encode(this->max_quantized_value()); + return true; + } + + inline void ComputeCorrection(const DataType *orig_vals, + const DataType *pred_vals, + CorrType *out_corr_vals) const { + DRACO_DCHECK_LE(pred_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(pred_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[0], this->center_value() * 2); + DRACO_DCHECK_LE(orig_vals[1], this->center_value() * 2); + DRACO_DCHECK_LE(0, pred_vals[0]); + DRACO_DCHECK_LE(0, pred_vals[1]); + DRACO_DCHECK_LE(0, orig_vals[0]); + DRACO_DCHECK_LE(0, orig_vals[1]); + + const Point2 orig = Point2(orig_vals[0], orig_vals[1]); + const Point2 pred = Point2(pred_vals[0], pred_vals[1]); + const Point2 corr = ComputeCorrection(orig, pred); + + out_corr_vals[0] = corr[0]; + out_corr_vals[1] = corr[1]; + } + + private: + Point2 ComputeCorrection(Point2 orig, Point2 pred) const { + const Point2 t(this->center_value(), this->center_value()); + orig = orig - t; + pred = pred - t; + + if (!this->IsInDiamond(pred[0], pred[1])) { + this->InvertDiamond(&orig[0], &orig[1]); + this->InvertDiamond(&pred[0], &pred[1]); + } + + Point2 corr = orig - pred; + corr[0] = this->MakePositive(corr[0]); + corr[1] = this->MakePositive(corr[1]); + return corr; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h new file mode 100644 index 000000000..c9dd7d67b --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_ + +#include + +#include "draco/compression/attributes/normal_compression_utils.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/bit_utils.h" +#include "draco/core/macros.h" +#include "draco/core/vector_d.h" + +namespace draco { + +// Base class containing shared functionality used by both encoding and decoding +// octahedral normal prediction scheme transforms. See the encoding transform +// for more details about the method. +template +class PredictionSchemeNormalOctahedronTransformBase { + public: + typedef VectorD Point2; + typedef DataTypeT DataType; + + PredictionSchemeNormalOctahedronTransformBase() {} + // We expect the mod value to be of the form 2^b-1. + explicit PredictionSchemeNormalOctahedronTransformBase( + DataType max_quantized_value) { + this->set_max_quantized_value(max_quantized_value); + } + + static constexpr PredictionSchemeTransformType GetType() { + return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON; + } + + // We can return true as we keep correction values positive. + bool AreCorrectionsPositive() const { return true; } + + inline DataTypeT max_quantized_value() const { + return octahedron_tool_box_.max_quantized_value(); + } + inline DataTypeT center_value() const { + return octahedron_tool_box_.center_value(); + } + inline int32_t quantization_bits() const { + return octahedron_tool_box_.quantization_bits(); + } + + protected: + inline bool set_max_quantized_value(DataTypeT max_quantized_value) { + if (max_quantized_value % 2 == 0) { + return false; + } + int q = MostSignificantBit(max_quantized_value) + 1; + return octahedron_tool_box_.SetQuantizationBits(q); + } + + bool IsInDiamond(DataTypeT s, DataTypeT t) const { + return octahedron_tool_box_.IsInDiamond(s, t); + } + void InvertDiamond(DataTypeT *s, DataTypeT *t) const { + return octahedron_tool_box_.InvertDiamond(s, t); + } + + int32_t ModMax(int32_t x) const { return octahedron_tool_box_.ModMax(x); } + + // For correction values. + int32_t MakePositive(int32_t x) const { + return octahedron_tool_box_.MakePositive(x); + } + + private: + OctahedronToolBox octahedron_tool_box_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_TRANSFORM_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc new file mode 100644 index 000000000..1001b19fa --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc @@ -0,0 +1,71 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h" +#include "draco/core/draco_test_base.h" + +namespace { + +class PredictionSchemeNormalOctahedronTransformTest : public ::testing::Test { + protected: + typedef draco::PredictionSchemeNormalOctahedronEncodingTransform + Transform; + typedef Transform::Point2 Point2; + + void TestComputeCorrection(const Transform &transform, const int32_t &ox, + const int32_t &oy, const int32_t &px, + const int32_t &py, const int32_t &cx, + const int32_t &cy) { + const int32_t o[2] = {ox + 7, oy + 7}; + const int32_t p[2] = {px + 7, py + 7}; + int32_t corr[2] = {500, 500}; + transform.ComputeCorrection(o, p, corr); + ASSERT_EQ(corr[0], (cx + 15) % 15); + ASSERT_EQ(corr[1], (cy + 15) % 15); + } +}; + +TEST_F(PredictionSchemeNormalOctahedronTransformTest, Init) { + const Transform transform(15); + ASSERT_TRUE(transform.AreCorrectionsPositive()); +} + +TEST_F(PredictionSchemeNormalOctahedronTransformTest, ComputeCorrections) { + const Transform transform(15); + // checks inside diamond + TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0); + TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0); + TestComputeCorrection(transform, 3, 4, 1, 1, 2, 3); + TestComputeCorrection(transform, -1, -1, -1, -1, 0, 0); + TestComputeCorrection(transform, -3, -4, -1, -1, -2, -3); + // checks outside diamond + TestComputeCorrection(transform, 4, 4, 4, 4, 0, 0); + TestComputeCorrection(transform, 5, 6, 4, 4, -2, -1); + TestComputeCorrection(transform, 3, 2, 4, 4, 2, 1); + // checks on outer edges + TestComputeCorrection(transform, 7, 7, 4, 4, -3, -3); + TestComputeCorrection(transform, 6, 7, 4, 4, -3, -2); + TestComputeCorrection(transform, -6, 7, 4, 4, -3, -2); + TestComputeCorrection(transform, 7, 6, 4, 4, -2, -3); + TestComputeCorrection(transform, 7, -6, 4, 4, -2, -3); +} + +TEST_F(PredictionSchemeNormalOctahedronTransformTest, Interface) { + const Transform transform(15); + ASSERT_EQ(transform.max_quantized_value(), 15); + ASSERT_EQ(transform.center_value(), 7); + ASSERT_EQ(transform.quantization_bits(), 4); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h new file mode 100644 index 000000000..e100c738a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h @@ -0,0 +1,88 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// PredictionSchemeWrapDecodingTransform unwraps values encoded with the +// PredictionSchemeWrapEncodingTransform. +// See prediction_scheme_wrap_transform_base.h for more details about the +// method. +template +class PredictionSchemeWrapDecodingTransform + : public PredictionSchemeWrapTransformBase { + public: + typedef CorrTypeT CorrType; + PredictionSchemeWrapDecodingTransform() {} + + // Computes the original value from the input predicted value and the decoded + // corrections. Values out of the bounds of the input values are unwrapped. + inline void ComputeOriginalValue(const DataTypeT *predicted_vals, + const CorrTypeT *corr_vals, + DataTypeT *out_original_vals) const { + // For now we assume both |DataTypeT| and |CorrTypeT| are equal. + static_assert(std::is_same::value, + "Predictions and corrections must have the same type."); + + // The only valid implementation right now is for int32_t. + static_assert(std::is_same::value, + "Only int32_t is supported for predicted values."); + + predicted_vals = this->ClampPredictedValue(predicted_vals); + + // Perform the wrapping using unsigned coordinates to avoid potential signed + // integer overflows caused by malformed input. + const uint32_t *const uint_predicted_vals = + reinterpret_cast(predicted_vals); + const uint32_t *const uint_corr_vals = + reinterpret_cast(corr_vals); + for (int i = 0; i < this->num_components(); ++i) { + out_original_vals[i] = + static_cast(uint_predicted_vals[i] + uint_corr_vals[i]); + if (out_original_vals[i] > this->max_value()) { + out_original_vals[i] -= this->max_dif(); + } else if (out_original_vals[i] < this->min_value()) { + out_original_vals[i] += this->max_dif(); + } + } + } + + bool DecodeTransformData(DecoderBuffer *buffer) { + DataTypeT min_value, max_value; + if (!buffer->Decode(&min_value)) { + return false; + } + if (!buffer->Decode(&max_value)) { + return false; + } + if (min_value > max_value) { + return false; + } + this->set_min_value(min_value); + this->set_max_value(max_value); + if (!this->InitCorrectionBounds()) { + return false; + } + return true; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_DECODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h new file mode 100644 index 000000000..1f5e8b135 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h @@ -0,0 +1,81 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// PredictionSchemeWrapEncodingTransform wraps input values using the wrapping +// scheme described in: prediction_scheme_wrap_transform_base.h . +template +class PredictionSchemeWrapEncodingTransform + : public PredictionSchemeWrapTransformBase { + public: + typedef CorrTypeT CorrType; + PredictionSchemeWrapEncodingTransform() {} + + void Init(const DataTypeT *orig_data, int size, int num_components) { + PredictionSchemeWrapTransformBase::Init(num_components); + // Go over the original values and compute the bounds. + if (size == 0) { + return; + } + DataTypeT min_value = orig_data[0]; + DataTypeT max_value = min_value; + for (int i = 1; i < size; ++i) { + if (orig_data[i] < min_value) { + min_value = orig_data[i]; + } else if (orig_data[i] > max_value) { + max_value = orig_data[i]; + } + } + this->set_min_value(min_value); + this->set_max_value(max_value); + this->InitCorrectionBounds(); + } + + // Computes the corrections based on the input original value and the + // predicted value. Out of bound correction values are wrapped around the max + // range of input values. + inline void ComputeCorrection(const DataTypeT *original_vals, + const DataTypeT *predicted_vals, + CorrTypeT *out_corr_vals) const { + for (int i = 0; i < this->num_components(); ++i) { + predicted_vals = this->ClampPredictedValue(predicted_vals); + out_corr_vals[i] = original_vals[i] - predicted_vals[i]; + // Wrap around if needed. + DataTypeT &corr_val = out_corr_vals[i]; + if (corr_val < this->min_correction()) { + corr_val += this->max_dif(); + } else if (corr_val > this->max_correction()) { + corr_val -= this->max_dif(); + } + } + } + + bool EncodeTransformData(EncoderBuffer *buffer) { + // Store the input value range as it is needed by the decoder. + buffer->Encode(this->min_value()); + buffer->Encode(this->max_value()); + return true; + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_ENCODING_TRANSFORM_H_ diff --git a/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h new file mode 100644 index 000000000..26f61fbaf --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h @@ -0,0 +1,120 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_ + +#include +#include + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/macros.h" + +namespace draco { + +// PredictionSchemeWrapTransform uses the min and max bounds of the original +// data to wrap stored correction values around these bounds centered at 0, +// i.e., when the range of the original values O is between and +// N = MAX-MIN, we can then store any correction X = O - P, as: +// X + N, if X < -N / 2 +// X - N, if X > N / 2 +// X otherwise +// To unwrap this value, the decoder then simply checks whether the final +// corrected value F = P + X is out of the bounds of the input values. +// All out of bounds values are unwrapped using +// F + N, if F < MIN +// F - N, if F > MAX +// This wrapping can reduce the number of unique values, which translates to a +// better entropy of the stored values and better compression rates. +template +class PredictionSchemeWrapTransformBase { + public: + PredictionSchemeWrapTransformBase() + : num_components_(0), + min_value_(0), + max_value_(0), + max_dif_(0), + max_correction_(0), + min_correction_(0) {} + + static constexpr PredictionSchemeTransformType GetType() { + return PREDICTION_TRANSFORM_WRAP; + } + + void Init(int num_components) { + num_components_ = num_components; + clamped_value_.resize(num_components); + } + + bool AreCorrectionsPositive() const { return false; } + + inline const DataTypeT *ClampPredictedValue( + const DataTypeT *predicted_val) const { + for (int i = 0; i < this->num_components(); ++i) { + if (predicted_val[i] > max_value_) { + clamped_value_[i] = max_value_; + } else if (predicted_val[i] < min_value_) { + clamped_value_[i] = min_value_; + } else { + clamped_value_[i] = predicted_val[i]; + } + } + return &clamped_value_[0]; + } + + // TODO(hemmer): Consider refactoring to avoid this dummy. + int quantization_bits() const { + DRACO_DCHECK(false); + return -1; + } + + protected: + bool InitCorrectionBounds() { + const int64_t dif = + static_cast(max_value_) - static_cast(min_value_); + if (dif < 0 || dif >= std::numeric_limits::max()) { + return false; + } + max_dif_ = 1 + static_cast(dif); + max_correction_ = max_dif_ / 2; + min_correction_ = -max_correction_; + if ((max_dif_ & 1) == 0) { + max_correction_ -= 1; + } + return true; + } + + inline int num_components() const { return num_components_; } + inline DataTypeT min_value() const { return min_value_; } + inline void set_min_value(const DataTypeT &v) { min_value_ = v; } + inline DataTypeT max_value() const { return max_value_; } + inline void set_max_value(const DataTypeT &v) { max_value_ = v; } + inline DataTypeT max_dif() const { return max_dif_; } + inline DataTypeT min_correction() const { return min_correction_; } + inline DataTypeT max_correction() const { return max_correction_; } + + private: + int num_components_; + DataTypeT min_value_; + DataTypeT max_value_; + DataTypeT max_dif_; + DataTypeT max_correction_; + DataTypeT min_correction_; + // This is in fact just a tmp variable to avoid reallocation. + mutable std::vector clamped_value_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_WRAP_TRANSFORM_BASE_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc new file mode 100644 index 000000000..b4ba24f2d --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.cc @@ -0,0 +1,118 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_decoder.h" + +namespace draco { + +SequentialAttributeDecoder::SequentialAttributeDecoder() + : decoder_(nullptr), attribute_(nullptr), attribute_id_(-1) {} + +bool SequentialAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + decoder_ = decoder; + attribute_ = decoder->point_cloud()->attribute(attribute_id); + attribute_id_ = attribute_id; + return true; +} + +bool SequentialAttributeDecoder::InitializeStandalone( + PointAttribute *attribute) { + attribute_ = attribute; + attribute_id_ = -1; + return true; +} + +bool SequentialAttributeDecoder::DecodePortableAttribute( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + if (attribute_->num_components() <= 0 || + !attribute_->Reset(point_ids.size())) { + return false; + } + if (!DecodeValues(point_ids, in_buffer)) { + return false; + } + return true; +} + +bool SequentialAttributeDecoder::DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + // Default implementation does not apply any transform. + return true; +} + +bool SequentialAttributeDecoder::TransformAttributeToOriginalFormat( + const std::vector &point_ids) { + // Default implementation does not apply any transform. + return true; +} + +const PointAttribute *SequentialAttributeDecoder::GetPortableAttribute() { + // If needed, copy point to attribute value index mapping from the final + // attribute to the portable attribute. + if (!attribute_->is_mapping_identity() && portable_attribute_ && + portable_attribute_->is_mapping_identity()) { + portable_attribute_->SetExplicitMapping(attribute_->indices_map_size()); + for (PointIndex i(0); + i < static_cast(attribute_->indices_map_size()); ++i) { + portable_attribute_->SetPointMapEntry(i, attribute_->mapped_index(i)); + } + } + return portable_attribute_.get(); +} + +bool SequentialAttributeDecoder::InitPredictionScheme( + PredictionSchemeInterface *ps) { + for (int i = 0; i < ps->GetNumParentAttributes(); ++i) { + const int att_id = decoder_->point_cloud()->GetNamedAttributeId( + ps->GetParentAttributeType(i)); + if (att_id == -1) { + return false; // Requested attribute does not exist. + } +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder_->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + if (!ps->SetParentAttribute(decoder_->point_cloud()->attribute(att_id))) { + return false; + } + } else +#endif + { + const PointAttribute *const pa = decoder_->GetPortableAttribute(att_id); + if (pa == nullptr || !ps->SetParentAttribute(pa)) { + return false; + } + } + } + return true; +} + +bool SequentialAttributeDecoder::DecodeValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + const int32_t num_values = static_cast(point_ids.size()); + const int entry_size = static_cast(attribute_->byte_stride()); + std::unique_ptr value_data_ptr(new uint8_t[entry_size]); + uint8_t *const value_data = value_data_ptr.get(); + int out_byte_pos = 0; + // Decode raw attribute values in their original format. + for (int i = 0; i < num_values; ++i) { + if (!in_buffer->Decode(value_data, entry_size)) { + return false; + } + attribute_->buffer()->Write(out_byte_pos, value_data, entry_size); + out_byte_pos += entry_size; + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h new file mode 100644 index 000000000..d48119465 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoder.h @@ -0,0 +1,86 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/compression/point_cloud/point_cloud_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// A base class for decoding attribute values encoded by the +// SequentialAttributeEncoder. +class SequentialAttributeDecoder { + public: + SequentialAttributeDecoder(); + virtual ~SequentialAttributeDecoder() = default; + + virtual bool Init(PointCloudDecoder *decoder, int attribute_id); + + // Initialization for a specific attribute. This can be used mostly for + // standalone decoding of an attribute without an PointCloudDecoder. + virtual bool InitializeStandalone(PointAttribute *attribute); + + // Performs lossless decoding of the portable attribute data. + virtual bool DecodePortableAttribute(const std::vector &point_ids, + DecoderBuffer *in_buffer); + + // Decodes any data needed to revert portable transform of the decoded + // attribute. + virtual bool DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer); + + // Reverts transformation performed by encoder in + // SequentialAttributeEncoder::TransformAttributeToPortableFormat() method. + virtual bool TransformAttributeToOriginalFormat( + const std::vector &point_ids); + + const PointAttribute *GetPortableAttribute(); + + const PointAttribute *attribute() const { return attribute_; } + PointAttribute *attribute() { return attribute_; } + int attribute_id() const { return attribute_id_; } + PointCloudDecoder *decoder() const { return decoder_; } + + protected: + // Should be used to initialize newly created prediction scheme. + // Returns false when the initialization failed (in which case the scheme + // cannot be used). + virtual bool InitPredictionScheme(PredictionSchemeInterface *ps); + + // The actual implementation of the attribute decoding. Should be overridden + // for specialized decoders. + virtual bool DecodeValues(const std::vector &point_ids, + DecoderBuffer *in_buffer); + + void SetPortableAttribute(std::unique_ptr att) { + portable_attribute_ = std::move(att); + } + + PointAttribute *portable_attribute() { return portable_attribute_.get(); } + + private: + PointCloudDecoder *decoder_; + PointAttribute *attribute_; + int attribute_id_; + + // Storage for decoded portable attribute (after lossless decoding). + std::unique_ptr portable_attribute_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc new file mode 100644 index 000000000..0e5e26bca --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.cc @@ -0,0 +1,149 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_decoders_controller.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/sequential_normal_attribute_decoder.h" +#endif +#include "draco/compression/attributes/sequential_quantization_attribute_decoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +SequentialAttributeDecodersController::SequentialAttributeDecodersController( + std::unique_ptr sequencer) + : sequencer_(std::move(sequencer)) {} + +bool SequentialAttributeDecodersController::DecodeAttributesDecoderData( + DecoderBuffer *buffer) { + if (!AttributesDecoder::DecodeAttributesDecoderData(buffer)) { + return false; + } + // Decode unique ids of all sequential encoders and create them. + const int32_t num_attributes = GetNumAttributes(); + sequential_decoders_.resize(num_attributes); + for (int i = 0; i < num_attributes; ++i) { + uint8_t decoder_type; + if (!buffer->Decode(&decoder_type)) { + return false; + } + // Create the decoder from the id. + sequential_decoders_[i] = CreateSequentialDecoder(decoder_type); + if (!sequential_decoders_[i]) { + return false; + } + if (!sequential_decoders_[i]->Init(GetDecoder(), GetAttributeId(i))) { + return false; + } + } + return true; +} + +bool SequentialAttributeDecodersController::DecodeAttributes( + DecoderBuffer *buffer) { + if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_)) { + return false; + } + // Initialize point to attribute value mapping for all decoded attributes. + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + PointAttribute *const pa = + GetDecoder()->point_cloud()->attribute(GetAttributeId(i)); + if (!sequencer_->UpdatePointToAttributeIndexMapping(pa)) { + return false; + } + } + return AttributesDecoder::DecodeAttributes(buffer); +} + +bool SequentialAttributeDecodersController::DecodePortableAttributes( + DecoderBuffer *in_buffer) { + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + if (!sequential_decoders_[i]->DecodePortableAttribute(point_ids_, + in_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeDecodersController:: + DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) { + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + if (!sequential_decoders_[i]->DecodeDataNeededByPortableTransform( + point_ids_, in_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeDecodersController:: + TransformAttributesToOriginalFormat() { + const int32_t num_attributes = GetNumAttributes(); + for (int i = 0; i < num_attributes; ++i) { + // Check whether the attribute transform should be skipped. + if (GetDecoder()->options()) { + const PointAttribute *const attribute = + sequential_decoders_[i]->attribute(); + const PointAttribute *const portable_attribute = + sequential_decoders_[i]->GetPortableAttribute(); + if (portable_attribute && + GetDecoder()->options()->GetAttributeBool( + attribute->attribute_type(), "skip_attribute_transform", false)) { + // Attribute transform should not be performed. In this case, we replace + // the output geometry attribute with the portable attribute. + // TODO(ostava): We can potentially avoid this copy by introducing a new + // mechanism that would allow to use the final attributes as portable + // attributes for predictors that may need them. + sequential_decoders_[i]->attribute()->CopyFrom(*portable_attribute); + continue; + } + } + if (!sequential_decoders_[i]->TransformAttributeToOriginalFormat( + point_ids_)) { + return false; + } + } + return true; +} + +std::unique_ptr +SequentialAttributeDecodersController::CreateSequentialDecoder( + uint8_t decoder_type) { + switch (decoder_type) { + case SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC: + return std::unique_ptr( + new SequentialAttributeDecoder()); + case SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER: + return std::unique_ptr( + new SequentialIntegerAttributeDecoder()); + case SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION: + return std::unique_ptr( + new SequentialQuantizationAttributeDecoder()); +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + case SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS: + return std::unique_ptr( + new SequentialNormalAttributeDecoder()); +#endif + default: + break; + } + // Unknown or unsupported decoder type. + return nullptr; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h new file mode 100644 index 000000000..abc1f3685 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_decoders_controller.h @@ -0,0 +1,61 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_ + +#include "draco/compression/attributes/attributes_decoder.h" +#include "draco/compression/attributes/points_sequencer.h" +#include "draco/compression/attributes/sequential_attribute_decoder.h" + +namespace draco { + +// A basic implementation of an attribute decoder that decodes data encoded by +// the SequentialAttributeEncodersController class. The +// SequentialAttributeDecodersController creates a single +// AttributeIndexedValuesDecoder for each of the decoded attribute, where the +// type of the values decoder is determined by the unique identifier that was +// encoded by the encoder. +class SequentialAttributeDecodersController : public AttributesDecoder { + public: + explicit SequentialAttributeDecodersController( + std::unique_ptr sequencer); + + bool DecodeAttributesDecoderData(DecoderBuffer *buffer) override; + bool DecodeAttributes(DecoderBuffer *buffer) override; + const PointAttribute *GetPortableAttribute( + int32_t point_attribute_id) override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return nullptr; + } + return sequential_decoders_[loc_id]->GetPortableAttribute(); + } + + protected: + bool DecodePortableAttributes(DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override; + bool TransformAttributesToOriginalFormat() override; + virtual std::unique_ptr CreateSequentialDecoder( + uint8_t decoder_type); + + private: + std::vector> sequential_decoders_; + std::vector point_ids_; + std::unique_ptr sequencer_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_DECODERS_CONTROLLER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc new file mode 100644 index 000000000..6bde3eeb3 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.cc @@ -0,0 +1,108 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_encoder.h" + +namespace draco { + +SequentialAttributeEncoder::SequentialAttributeEncoder() + : encoder_(nullptr), + attribute_(nullptr), + attribute_id_(-1), + is_parent_encoder_(false) {} + +bool SequentialAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + encoder_ = encoder; + attribute_ = encoder_->point_cloud()->attribute(attribute_id); + attribute_id_ = attribute_id; + return true; +} + +bool SequentialAttributeEncoder::InitializeStandalone( + PointAttribute *attribute) { + attribute_ = attribute; + attribute_id_ = -1; + return true; +} + +bool SequentialAttributeEncoder::TransformAttributeToPortableFormat( + const std::vector &point_ids) { + // Default implementation doesn't transform the input data. + return true; +} + +bool SequentialAttributeEncoder::EncodePortableAttribute( + const std::vector &point_ids, EncoderBuffer *out_buffer) { + // Lossless encoding of the input values. + if (!EncodeValues(point_ids, out_buffer)) { + return false; + } + return true; +} + +bool SequentialAttributeEncoder::EncodeDataNeededByPortableTransform( + EncoderBuffer *out_buffer) { + // Default implementation doesn't transform the input data. + return true; +} + +bool SequentialAttributeEncoder::EncodeValues( + const std::vector &point_ids, EncoderBuffer *out_buffer) { + const int entry_size = static_cast(attribute_->byte_stride()); + const std::unique_ptr value_data_ptr(new uint8_t[entry_size]); + uint8_t *const value_data = value_data_ptr.get(); + // Encode all attribute values in their native raw format. + for (uint32_t i = 0; i < point_ids.size(); ++i) { + const AttributeValueIndex entry_id = attribute_->mapped_index(point_ids[i]); + attribute_->GetValue(entry_id, value_data); + out_buffer->Encode(value_data, entry_size); + } + return true; +} + +void SequentialAttributeEncoder::MarkParentAttribute() { + is_parent_encoder_ = true; +} + +bool SequentialAttributeEncoder::InitPredictionScheme( + PredictionSchemeInterface *ps) { + for (int i = 0; i < ps->GetNumParentAttributes(); ++i) { + const int att_id = encoder_->point_cloud()->GetNamedAttributeId( + ps->GetParentAttributeType(i)); + if (att_id == -1) { + return false; // Requested attribute does not exist. + } + parent_attributes_.push_back(att_id); + encoder_->MarkParentAttribute(att_id); + } + return true; +} + +bool SequentialAttributeEncoder::SetPredictionSchemeParentAttributes( + PredictionSchemeInterface *ps) { + for (int i = 0; i < ps->GetNumParentAttributes(); ++i) { + const int att_id = encoder_->point_cloud()->GetNamedAttributeId( + ps->GetParentAttributeType(i)); + if (att_id == -1) { + return false; // Requested attribute does not exist. + } + if (!ps->SetParentAttribute(encoder_->GetPortableAttribute(att_id))) { + return false; + } + } + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h new file mode 100644 index 000000000..00f62db89 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoder.h @@ -0,0 +1,134 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h" +#include "draco/compression/point_cloud/point_cloud_encoder.h" + +namespace draco { + +// A base class for encoding attribute values of a single attribute using a +// given sequence of point ids. The default implementation encodes all attribute +// values directly to the buffer but derived classes can perform any custom +// encoding (such as quantization) by overriding the EncodeValues() method. +class SequentialAttributeEncoder { + public: + SequentialAttributeEncoder(); + virtual ~SequentialAttributeEncoder() = default; + + // Method that can be used for custom initialization of an attribute encoder, + // such as creation of prediction schemes and initialization of attribute + // encoder dependencies. + // |encoder| is the parent PointCloudEncoder, + // |attribute_id| is the id of the attribute that is being encoded by this + // encoder. + // This method is automatically called by the PointCloudEncoder after all + // attribute encoders are created and it should not be called explicitly from + // other places. + virtual bool Init(PointCloudEncoder *encoder, int attribute_id); + + // Initialization for a specific attribute. This can be used mostly for + // standalone encoding of an attribute without an PointCloudEncoder. + virtual bool InitializeStandalone(PointAttribute *attribute); + + // Transforms attribute data into format that is going to be encoded + // losslessly. The transform itself can be lossy. + virtual bool TransformAttributeToPortableFormat( + const std::vector &point_ids); + + // Performs lossless encoding of the transformed attribute data. + virtual bool EncodePortableAttribute(const std::vector &point_ids, + EncoderBuffer *out_buffer); + + // Encodes any data related to the portable attribute transform. + virtual bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer); + + virtual bool IsLossyEncoder() const { return false; } + + int NumParentAttributes() const { + return static_cast(parent_attributes_.size()); + } + int GetParentAttributeId(int i) const { return parent_attributes_[i]; } + + const PointAttribute *GetPortableAttribute() const { + if (portable_attribute_ != nullptr) { + return portable_attribute_.get(); + } + return attribute(); + } + + // Called when this attribute encoder becomes a parent encoder of another + // encoder. + void MarkParentAttribute(); + + virtual uint8_t GetUniqueId() const { + return SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC; + } + + const PointAttribute *attribute() const { return attribute_; } + int attribute_id() const { return attribute_id_; } + PointCloudEncoder *encoder() const { return encoder_; } + + protected: + // Should be used to initialize newly created prediction scheme. + // Returns false when the initialization failed (in which case the scheme + // cannot be used). + virtual bool InitPredictionScheme(PredictionSchemeInterface *ps); + + // Sets parent attributes for a given prediction scheme. Must be called + // after all prediction schemes are initialized, but before the prediction + // scheme is used. + virtual bool SetPredictionSchemeParentAttributes( + PredictionSchemeInterface *ps); + + // Encodes all attribute values in the specified order. Should be overridden + // for specialized encoders. + virtual bool EncodeValues(const std::vector &point_ids, + EncoderBuffer *out_buffer); + + bool is_parent_encoder() const { return is_parent_encoder_; } + + void SetPortableAttribute(std::unique_ptr att) { + portable_attribute_ = std::move(att); + } + + // Returns a mutable attribute that should be filled by derived encoders with + // the transformed version of the attribute data. To get a public const + // version, use the GetPortableAttribute() method. + PointAttribute *portable_attribute() { return portable_attribute_.get(); } + + private: + PointCloudEncoder *encoder_; + const PointAttribute *attribute_; + int attribute_id_; + + // List of attribute encoders that need to be encoded before this attribute. + // E.g. The parent attributes may be used to predict values used by this + // attribute encoder. + std::vector parent_attributes_; + + bool is_parent_encoder_; + + // Attribute that stores transformed data from the source attribute after it + // is processed through the ApplyTransform() method. Attribute data stored + // within this attribute is guaranteed to be encoded losslessly and it can be + // safely used for prediction of other attributes. + std::unique_ptr portable_attribute_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc new file mode 100644 index 000000000..7d5d1eeff --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.cc @@ -0,0 +1,159 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_attribute_encoders_controller.h" +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED +#include "draco/compression/attributes/sequential_normal_attribute_encoder.h" +#endif +#include "draco/compression/attributes/sequential_quantization_attribute_encoder.h" +#include "draco/compression/point_cloud/point_cloud_encoder.h" + +namespace draco { + +SequentialAttributeEncodersController::SequentialAttributeEncodersController( + std::unique_ptr sequencer) + : sequencer_(std::move(sequencer)) {} + +SequentialAttributeEncodersController::SequentialAttributeEncodersController( + std::unique_ptr sequencer, int point_attrib_id) + : AttributesEncoder(point_attrib_id), sequencer_(std::move(sequencer)) {} + +bool SequentialAttributeEncodersController::Init(PointCloudEncoder *encoder, + const PointCloud *pc) { + if (!AttributesEncoder::Init(encoder, pc)) { + return false; + } + if (!CreateSequentialEncoders()) { + return false; + } + // Initialize all value encoders. + for (uint32_t i = 0; i < num_attributes(); ++i) { + const int32_t att_id = GetAttributeId(i); + if (!sequential_encoders_[i]->Init(encoder, att_id)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController::EncodeAttributesEncoderData( + EncoderBuffer *out_buffer) { + if (!AttributesEncoder::EncodeAttributesEncoderData(out_buffer)) { + return false; + } + // Encode a unique id of every sequential encoder. + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + out_buffer->Encode(sequential_encoders_[i]->GetUniqueId()); + } + return true; +} + +bool SequentialAttributeEncodersController::EncodeAttributes( + EncoderBuffer *buffer) { + if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_)) { + return false; + } + return AttributesEncoder::EncodeAttributes(buffer); +} + +bool SequentialAttributeEncodersController:: + TransformAttributesToPortableFormat() { + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + if (!sequential_encoders_[i]->TransformAttributeToPortableFormat( + point_ids_)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController::EncodePortableAttributes( + EncoderBuffer *out_buffer) { + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + if (!sequential_encoders_[i]->EncodePortableAttribute(point_ids_, + out_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController:: + EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) { + for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) { + if (!sequential_encoders_[i]->EncodeDataNeededByPortableTransform( + out_buffer)) { + return false; + } + } + return true; +} + +bool SequentialAttributeEncodersController::CreateSequentialEncoders() { + sequential_encoders_.resize(num_attributes()); + for (uint32_t i = 0; i < num_attributes(); ++i) { + sequential_encoders_[i] = CreateSequentialEncoder(i); + if (sequential_encoders_[i] == nullptr) { + return false; + } + if (i < sequential_encoder_marked_as_parent_.size()) { + if (sequential_encoder_marked_as_parent_[i]) { + sequential_encoders_[i]->MarkParentAttribute(); + } + } + } + return true; +} + +std::unique_ptr +SequentialAttributeEncodersController::CreateSequentialEncoder(int i) { + const int32_t att_id = GetAttributeId(i); + const PointAttribute *const att = encoder()->point_cloud()->attribute(att_id); + + switch (att->data_type()) { + case DT_UINT8: + case DT_INT8: + case DT_UINT16: + case DT_INT16: + case DT_UINT32: + case DT_INT32: + return std::unique_ptr( + new SequentialIntegerAttributeEncoder()); + case DT_FLOAT32: + if (encoder()->options()->GetAttributeInt(att_id, "quantization_bits", + -1) > 0) { +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + if (att->attribute_type() == GeometryAttribute::NORMAL) { + // We currently only support normals with float coordinates + // and must be quantized. + return std::unique_ptr( + new SequentialNormalAttributeEncoder()); + } else { +#endif + return std::unique_ptr( + new SequentialQuantizationAttributeEncoder()); +#ifdef DRACO_NORMAL_ENCODING_SUPPORTED + } +#endif + } + break; + default: + break; + } + // Return the default attribute encoder. + return std::unique_ptr( + new SequentialAttributeEncoder()); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h new file mode 100644 index 000000000..13c2704ec --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_attribute_encoders_controller.h @@ -0,0 +1,115 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_ + +#include "draco/compression/attributes/attributes_encoder.h" +#include "draco/compression/attributes/points_sequencer.h" +#include "draco/compression/attributes/sequential_attribute_encoder.h" + +namespace draco { + +// A basic implementation of an attribute encoder that can be used to encode +// an arbitrary set of attributes. The encoder creates a sequential attribute +// encoder for each encoded attribute (see sequential_attribute_encoder.h) and +// then it encodes all attribute values in an order defined by a point sequence +// generated in the GeneratePointSequence() method. The default implementation +// generates a linear sequence of all points, but derived classes can generate +// any custom sequence. +class SequentialAttributeEncodersController : public AttributesEncoder { + public: + explicit SequentialAttributeEncodersController( + std::unique_ptr sequencer); + SequentialAttributeEncodersController( + std::unique_ptr sequencer, int point_attrib_id); + + bool Init(PointCloudEncoder *encoder, const PointCloud *pc) override; + bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer) override; + bool EncodeAttributes(EncoderBuffer *buffer) override; + uint8_t GetUniqueId() const override { return BASIC_ATTRIBUTE_ENCODER; } + + int NumParentAttributes(int32_t point_attribute_id) const override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return 0; + } + return sequential_encoders_[loc_id]->NumParentAttributes(); + } + + int GetParentAttributeId(int32_t point_attribute_id, + int32_t parent_i) const override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return -1; + } + return sequential_encoders_[loc_id]->GetParentAttributeId(parent_i); + } + + bool MarkParentAttribute(int32_t point_attribute_id) override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return false; + } + // Mark the attribute encoder as parent (even when if it is not created + // yet). + if (sequential_encoder_marked_as_parent_.size() <= loc_id) { + sequential_encoder_marked_as_parent_.resize(loc_id + 1, false); + } + sequential_encoder_marked_as_parent_[loc_id] = true; + + if (sequential_encoders_.size() <= loc_id) { + return true; // Sequential encoders not generated yet. + } + sequential_encoders_[loc_id]->MarkParentAttribute(); + return true; + } + + const PointAttribute *GetPortableAttribute( + int32_t point_attribute_id) override { + const int32_t loc_id = GetLocalIdForPointAttribute(point_attribute_id); + if (loc_id < 0) { + return nullptr; + } + return sequential_encoders_[loc_id]->GetPortableAttribute(); + } + + protected: + bool TransformAttributesToPortableFormat() override; + bool EncodePortableAttributes(EncoderBuffer *out_buffer) override; + bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override; + + // Creates all sequential encoders (one for each attribute associated with the + // encoder). + virtual bool CreateSequentialEncoders(); + + // Create a sequential encoder for a given attribute based on the attribute + // type + // and the provided encoder options. + virtual std::unique_ptr CreateSequentialEncoder( + int i); + + private: + std::vector> sequential_encoders_; + + // Flag for each sequential attribute encoder indicating whether it was marked + // as parent attribute or not. + std::vector sequential_encoder_marked_as_parent_; + std::vector point_ids_; + std::unique_ptr sequencer_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_ATTRIBUTE_ENCODERS_CONTROLLER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc new file mode 100644 index 000000000..83f42125a --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.cc @@ -0,0 +1,240 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_decoding_transform.h" +#include "draco/compression/entropy/symbol_decoding.h" + +namespace draco { + +SequentialIntegerAttributeDecoder::SequentialIntegerAttributeDecoder() {} + +bool SequentialIntegerAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + if (!SequentialAttributeDecoder::Init(decoder, attribute_id)) { + return false; + } + return true; +} + +bool SequentialIntegerAttributeDecoder::TransformAttributeToOriginalFormat( + const std::vector &point_ids) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder() && + decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + return true; // Don't revert the transform here for older files. + } +#endif + return StoreValues(static_cast(point_ids.size())); +} + +bool SequentialIntegerAttributeDecoder::DecodeValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + // Decode prediction scheme. + int8_t prediction_scheme_method; + if (!in_buffer->Decode(&prediction_scheme_method)) { + return false; + } + if (prediction_scheme_method != PREDICTION_NONE) { + int8_t prediction_transform_type; + if (!in_buffer->Decode(&prediction_transform_type)) { + return false; + } + // Check that decoded prediction scheme transform type is valid. + if (prediction_transform_type < PREDICTION_TRANSFORM_NONE || + prediction_transform_type >= NUM_PREDICTION_SCHEME_TRANSFORM_TYPES) { + return false; + } + prediction_scheme_ = CreateIntPredictionScheme( + static_cast(prediction_scheme_method), + static_cast(prediction_transform_type)); + } + + if (prediction_scheme_) { + if (!InitPredictionScheme(prediction_scheme_.get())) { + return false; + } + } + + if (!DecodeIntegerValues(point_ids, in_buffer)) { + return false; + } + +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + const int32_t num_values = static_cast(point_ids.size()); + if (decoder() && + decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + // For older files, revert the transform right after we decode the data. + if (!StoreValues(num_values)) { + return false; + } + } +#endif + return true; +} + +std::unique_ptr> +SequentialIntegerAttributeDecoder::CreateIntPredictionScheme( + PredictionSchemeMethod method, + PredictionSchemeTransformType transform_type) { + if (transform_type != PREDICTION_TRANSFORM_WRAP) { + return nullptr; // For now we support only wrap transform. + } + return CreatePredictionSchemeForDecoder< + int32_t, PredictionSchemeWrapDecodingTransform>( + method, attribute_id(), decoder()); +} + +bool SequentialIntegerAttributeDecoder::DecodeIntegerValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + const int num_components = GetNumValueComponents(); + if (num_components <= 0) { + return false; + } + const size_t num_entries = point_ids.size(); + const size_t num_values = num_entries * num_components; + PreparePortableAttribute(static_cast(num_entries), num_components); + int32_t *const portable_attribute_data = GetPortableAttributeData(); + if (portable_attribute_data == nullptr) { + return false; + } + uint8_t compressed; + if (!in_buffer->Decode(&compressed)) { + return false; + } + if (compressed > 0) { + // Decode compressed values. + if (!DecodeSymbols(static_cast(num_values), num_components, + in_buffer, + reinterpret_cast(portable_attribute_data))) { + return false; + } + } else { + // Decode the integer data directly. + // Get the number of bytes for a given entry. + uint8_t num_bytes; + if (!in_buffer->Decode(&num_bytes)) { + return false; + } + if (num_bytes == DataTypeLength(DT_INT32)) { + if (portable_attribute()->buffer()->data_size() < + sizeof(int32_t) * num_values) { + return false; + } + if (!in_buffer->Decode(portable_attribute_data, + sizeof(int32_t) * num_values)) { + return false; + } + } else { + if (portable_attribute()->buffer()->data_size() < + num_bytes * num_values) { + return false; + } + if (in_buffer->remaining_size() < + static_cast(num_bytes) * static_cast(num_values)) { + return false; + } + for (size_t i = 0; i < num_values; ++i) { + if (!in_buffer->Decode(portable_attribute_data + i, num_bytes)) + return false; + } + } + } + + if (num_values > 0 && (prediction_scheme_ == nullptr || + !prediction_scheme_->AreCorrectionsPositive())) { + // Convert the values back to the original signed format. + ConvertSymbolsToSignedInts( + reinterpret_cast(portable_attribute_data), + static_cast(num_values), portable_attribute_data); + } + + // If the data was encoded with a prediction scheme, we must revert it. + if (prediction_scheme_) { + if (!prediction_scheme_->DecodePredictionData(in_buffer)) { + return false; + } + + if (num_values > 0) { + if (!prediction_scheme_->ComputeOriginalValues( + portable_attribute_data, portable_attribute_data, + static_cast(num_values), num_components, point_ids.data())) { + return false; + } + } + } + return true; +} + +bool SequentialIntegerAttributeDecoder::StoreValues(uint32_t num_values) { + switch (attribute()->data_type()) { + case DT_UINT8: + StoreTypedValues(num_values); + break; + case DT_INT8: + StoreTypedValues(num_values); + break; + case DT_UINT16: + StoreTypedValues(num_values); + break; + case DT_INT16: + StoreTypedValues(num_values); + break; + case DT_UINT32: + StoreTypedValues(num_values); + break; + case DT_INT32: + StoreTypedValues(num_values); + break; + default: + return false; + } + return true; +} + +template +void SequentialIntegerAttributeDecoder::StoreTypedValues(uint32_t num_values) { + const int num_components = attribute()->num_components(); + const int entry_size = sizeof(AttributeTypeT) * num_components; + const std::unique_ptr att_val( + new AttributeTypeT[num_components]); + const int32_t *const portable_attribute_data = GetPortableAttributeData(); + int val_id = 0; + int out_byte_pos = 0; + for (uint32_t i = 0; i < num_values; ++i) { + for (int c = 0; c < num_components; ++c) { + const AttributeTypeT value = + static_cast(portable_attribute_data[val_id++]); + att_val[c] = value; + } + // Store the integer value into the attribute buffer. + attribute()->buffer()->Write(out_byte_pos, att_val.get(), entry_size); + out_byte_pos += entry_size; + } +} + +void SequentialIntegerAttributeDecoder::PreparePortableAttribute( + int num_entries, int num_components) { + GeometryAttribute va; + va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32, + false, num_components * DataTypeLength(DT_INT32), 0); + std::unique_ptr port_att(new PointAttribute(va)); + port_att->SetIdentityMapping(); + port_att->Reset(num_entries); + SetPortableAttribute(std::move(port_att)); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h new file mode 100644 index 000000000..ef48ed817 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_decoder.h @@ -0,0 +1,76 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h" +#include "draco/compression/attributes/sequential_attribute_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for attributes encoded with the SequentialIntegerAttributeEncoder. +class SequentialIntegerAttributeDecoder : public SequentialAttributeDecoder { + public: + SequentialIntegerAttributeDecoder(); + bool Init(PointCloudDecoder *decoder, int attribute_id) override; + + bool TransformAttributeToOriginalFormat( + const std::vector &point_ids) override; + + protected: + bool DecodeValues(const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + virtual bool DecodeIntegerValues(const std::vector &point_ids, + DecoderBuffer *in_buffer); + + // Returns a prediction scheme that should be used for decoding of the + // integer values. + virtual std::unique_ptr> + CreateIntPredictionScheme(PredictionSchemeMethod method, + PredictionSchemeTransformType transform_type); + + // Returns the number of integer attribute components. In general, this + // can be different from the number of components of the input attribute. + virtual int32_t GetNumValueComponents() const { + return attribute()->num_components(); + } + + // Called after all integer values are decoded. The implementation should + // use this method to store the values into the attribute. + virtual bool StoreValues(uint32_t num_values); + + void PreparePortableAttribute(int num_entries, int num_components); + + int32_t *GetPortableAttributeData() { + if (portable_attribute()->size() == 0) { + return nullptr; + } + return reinterpret_cast( + portable_attribute()->GetAddress(AttributeValueIndex(0))); + } + + private: + // Stores decoded values into the attribute with a data type AttributeTypeT. + template + void StoreTypedValues(uint32_t num_values); + + std::unique_ptr> + prediction_scheme_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc new file mode 100644 index 000000000..e66a0a8a4 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.cc @@ -0,0 +1,233 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h" +#include "draco/compression/entropy/symbol_encoding.h" +#include "draco/core/bit_utils.h" + +namespace draco { + +SequentialIntegerAttributeEncoder::SequentialIntegerAttributeEncoder() {} + +bool SequentialIntegerAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + if (!SequentialAttributeEncoder::Init(encoder, attribute_id)) { + return false; + } + if (GetUniqueId() == SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER) { + // When encoding integers, this encoder currently works only for integer + // attributes up to 32 bits. + switch (attribute()->data_type()) { + case DT_INT8: + case DT_UINT8: + case DT_INT16: + case DT_UINT16: + case DT_INT32: + case DT_UINT32: + break; + default: + return false; + } + } + // Init prediction scheme. + const PredictionSchemeMethod prediction_scheme_method = + GetPredictionMethodFromOptions(attribute_id, *encoder->options()); + + prediction_scheme_ = CreateIntPredictionScheme(prediction_scheme_method); + + if (prediction_scheme_ && !InitPredictionScheme(prediction_scheme_.get())) { + prediction_scheme_ = nullptr; + } + + return true; +} + +bool SequentialIntegerAttributeEncoder::TransformAttributeToPortableFormat( + const std::vector &point_ids) { + if (encoder()) { + if (!PrepareValues(point_ids, encoder()->point_cloud()->num_points())) { + return false; + } + } else { + if (!PrepareValues(point_ids, 0)) { + return false; + } + } + + // Update point to attribute mapping with the portable attribute if the + // attribute is a parent attribute (for now, we can skip it otherwise). + if (is_parent_encoder()) { + // First create map between original attribute value indices and new ones + // (determined by the encoding order). + const PointAttribute *const orig_att = attribute(); + PointAttribute *const portable_att = portable_attribute(); + IndexTypeVector + value_to_value_map(orig_att->size()); + for (int i = 0; i < point_ids.size(); ++i) { + value_to_value_map[orig_att->mapped_index(point_ids[i])] = + AttributeValueIndex(i); + } + if (portable_att->is_mapping_identity()) { + portable_att->SetExplicitMapping(encoder()->point_cloud()->num_points()); + } + // Go over all points of the original attribute and update the mapping in + // the portable attribute. + for (PointIndex i(0); i < encoder()->point_cloud()->num_points(); ++i) { + portable_att->SetPointMapEntry( + i, value_to_value_map[orig_att->mapped_index(i)]); + } + } + return true; +} + +std::unique_ptr> +SequentialIntegerAttributeEncoder::CreateIntPredictionScheme( + PredictionSchemeMethod method) { + return CreatePredictionSchemeForEncoder< + int32_t, PredictionSchemeWrapEncodingTransform>( + method, attribute_id(), encoder()); +} + +bool SequentialIntegerAttributeEncoder::EncodeValues( + const std::vector &point_ids, EncoderBuffer *out_buffer) { + // Initialize general quantization data. + const PointAttribute *const attrib = attribute(); + if (attrib->size() == 0) { + return true; + } + + int8_t prediction_scheme_method = PREDICTION_NONE; + if (prediction_scheme_) { + if (!SetPredictionSchemeParentAttributes(prediction_scheme_.get())) { + return false; + } + prediction_scheme_method = + static_cast(prediction_scheme_->GetPredictionMethod()); + } + out_buffer->Encode(prediction_scheme_method); + if (prediction_scheme_) { + out_buffer->Encode( + static_cast(prediction_scheme_->GetTransformType())); + } + + const int num_components = portable_attribute()->num_components(); + const int num_values = + static_cast(num_components * portable_attribute()->size()); + const int32_t *const portable_attribute_data = GetPortableAttributeData(); + + // We need to keep the portable data intact, but several encoding steps can + // result in changes of this data, e.g., by applying prediction schemes that + // change the data in place. To preserve the portable data we store and + // process all encoded data in a separate array. + std::vector encoded_data(num_values); + + // All integer values are initialized. Process them using the prediction + // scheme if we have one. + if (prediction_scheme_) { + prediction_scheme_->ComputeCorrectionValues( + portable_attribute_data, &encoded_data[0], num_values, num_components, + point_ids.data()); + } + + if (prediction_scheme_ == nullptr || + !prediction_scheme_->AreCorrectionsPositive()) { + const int32_t *const input = + prediction_scheme_ ? encoded_data.data() : portable_attribute_data; + ConvertSignedIntsToSymbols(input, num_values, + reinterpret_cast(&encoded_data[0])); + } + + if (encoder() == nullptr || encoder()->options()->GetGlobalBool( + "use_built_in_attribute_compression", true)) { + out_buffer->Encode(static_cast(1)); + Options symbol_encoding_options; + if (encoder() != nullptr) { + SetSymbolEncodingCompressionLevel(&symbol_encoding_options, + 10 - encoder()->options()->GetSpeed()); + } + if (!EncodeSymbols(reinterpret_cast(encoded_data.data()), + static_cast(point_ids.size()) * num_components, + num_components, &symbol_encoding_options, out_buffer)) { + return false; + } + } else { + // No compression. Just store the raw integer values, using the number of + // bytes as needed. + + // To compute the maximum bit-length, first OR all values. + uint32_t masked_value = 0; + for (uint32_t i = 0; i < static_cast(num_values); ++i) { + masked_value |= encoded_data[i]; + } + // Compute the msb of the ORed value. + int value_msb_pos = 0; + if (masked_value != 0) { + value_msb_pos = MostSignificantBit(masked_value); + } + const int num_bytes = 1 + value_msb_pos / 8; + + out_buffer->Encode(static_cast(0)); + out_buffer->Encode(static_cast(num_bytes)); + + if (num_bytes == DataTypeLength(DT_INT32)) { + out_buffer->Encode(encoded_data.data(), sizeof(int32_t) * num_values); + } else { + for (uint32_t i = 0; i < static_cast(num_values); ++i) { + out_buffer->Encode(encoded_data.data() + i, num_bytes); + } + } + } + if (prediction_scheme_) { + prediction_scheme_->EncodePredictionData(out_buffer); + } + return true; +} + +bool SequentialIntegerAttributeEncoder::PrepareValues( + const std::vector &point_ids, int num_points) { + // Convert all values to int32_t format. + const PointAttribute *const attrib = attribute(); + const int num_components = attrib->num_components(); + const int num_entries = static_cast(point_ids.size()); + PreparePortableAttribute(num_entries, num_components, num_points); + int32_t dst_index = 0; + int32_t *const portable_attribute_data = GetPortableAttributeData(); + for (PointIndex pi : point_ids) { + const AttributeValueIndex att_id = attrib->mapped_index(pi); + if (!attrib->ConvertValue(att_id, + portable_attribute_data + dst_index)) { + return false; + } + dst_index += num_components; + } + return true; +} + +void SequentialIntegerAttributeEncoder::PreparePortableAttribute( + int num_entries, int num_components, int num_points) { + GeometryAttribute va; + va.Init(attribute()->attribute_type(), nullptr, num_components, DT_INT32, + false, num_components * DataTypeLength(DT_INT32), 0); + std::unique_ptr port_att(new PointAttribute(va)); + port_att->Reset(num_entries); + SetPortableAttribute(std::move(port_att)); + if (num_points) { + portable_attribute()->SetExplicitMapping(num_points); + } +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h new file mode 100644 index 000000000..c1d6222ef --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoder.h @@ -0,0 +1,67 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_ + +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h" +#include "draco/compression/attributes/sequential_attribute_encoder.h" + +namespace draco { + +// Attribute encoder designed for lossless encoding of integer attributes. The +// attribute values can be pre-processed by a prediction scheme and compressed +// with a built-in entropy coder. +class SequentialIntegerAttributeEncoder : public SequentialAttributeEncoder { + public: + SequentialIntegerAttributeEncoder(); + uint8_t GetUniqueId() const override { + return SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER; + } + + bool Init(PointCloudEncoder *encoder, int attribute_id) override; + bool TransformAttributeToPortableFormat( + const std::vector &point_ids) override; + + protected: + bool EncodeValues(const std::vector &point_ids, + EncoderBuffer *out_buffer) override; + + // Returns a prediction scheme that should be used for encoding of the + // integer values. + virtual std::unique_ptr> + CreateIntPredictionScheme(PredictionSchemeMethod method); + + // Prepares the integer values that are going to be encoded. + virtual bool PrepareValues(const std::vector &point_ids, + int num_points); + + void PreparePortableAttribute(int num_entries, int num_components, + int num_points); + + int32_t *GetPortableAttributeData() { + return reinterpret_cast( + portable_attribute()->GetAddress(AttributeValueIndex(0))); + } + + private: + // Optional prediction scheme can be used to modify the integer values in + // order to make them easier to compress. + std::unique_ptr> + prediction_scheme_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_INTEGER_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc new file mode 100644 index 000000000..44485e679 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_integer_attribute_encoding_test.cc @@ -0,0 +1,64 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include + +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/draco_test_base.h" + +namespace draco { + +class SequentialIntegerAttributeEncodingTest : public ::testing::Test { + protected: +}; + +TEST_F(SequentialIntegerAttributeEncodingTest, DoesCompress) { + // This test verifies that IntegerEncoding encodes and decodes the given data. + const std::vector values{1, 8, 7, 5, 5, 5, 9, + 155, -6, -9, 9, 125, 1, 0}; + PointAttribute pa; + pa.Init(GeometryAttribute::GENERIC, 1, DT_INT32, false, values.size()); + for (uint32_t i = 0; i < values.size(); ++i) { + pa.SetAttributeValue(AttributeValueIndex(i), &values[i]); + } + // List of point ids from 0 to point_ids.size() - 1. + std::vector point_ids(values.size()); + std::iota(point_ids.begin(), point_ids.end(), 0); + + EncoderBuffer out_buf; + SequentialIntegerAttributeEncoder ie; + ASSERT_TRUE(ie.InitializeStandalone(&pa)); + ASSERT_TRUE(ie.TransformAttributeToPortableFormat(point_ids)); + ASSERT_TRUE(ie.EncodePortableAttribute(point_ids, &out_buf)); + ASSERT_TRUE(ie.EncodeDataNeededByPortableTransform(&out_buf)); + + DecoderBuffer in_buf; + in_buf.Init(out_buf.data(), out_buf.size()); + in_buf.set_bitstream_version(kDracoMeshBitstreamVersion); + SequentialIntegerAttributeDecoder id; + ASSERT_TRUE(id.InitializeStandalone(&pa)); + ASSERT_TRUE(id.DecodePortableAttribute(point_ids, &in_buf)); + ASSERT_TRUE(id.DecodeDataNeededByPortableTransform(point_ids, &in_buf)); + ASSERT_TRUE(id.TransformAttributeToOriginalFormat(point_ids)); + + for (uint32_t i = 0; i < values.size(); ++i) { + int32_t entry_val; + pa.GetValue(AttributeValueIndex(i), &entry_val); + ASSERT_EQ(entry_val, values[i]); + } +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc new file mode 100644 index 000000000..de36c1c36 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.cc @@ -0,0 +1,76 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_normal_attribute_decoder.h" + +#include "draco/compression/attributes/normal_compression_utils.h" + +namespace draco { + +SequentialNormalAttributeDecoder::SequentialNormalAttributeDecoder() {} + +bool SequentialNormalAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id)) { + return false; + } + // Currently, this encoder works only for 3-component normal vectors. + if (attribute()->num_components() != 3) { + return false; + } + // Also the data type must be DT_FLOAT32. + if (attribute()->data_type() != DT_FLOAT32) { + return false; + } + return true; +} + +bool SequentialNormalAttributeDecoder::DecodeIntegerValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + // Note: in older bitstreams, we do not have a PortableAttribute() decoded + // at this stage so we cannot pass it down to the DecodeParameters() call. + // It still works fine for octahedral transform because it does not need to + // use any data from the attribute. + if (!octahedral_transform_.DecodeParameters(*attribute(), in_buffer)) { + return false; + } + } +#endif + return SequentialIntegerAttributeDecoder::DecodeIntegerValues(point_ids, + in_buffer); +} + +bool SequentialNormalAttributeDecoder::DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + if (decoder()->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 0)) { + // For newer file version, decode attribute transform data here. + if (!octahedral_transform_.DecodeParameters(*GetPortableAttribute(), + in_buffer)) { + return false; + } + } + + // Store the decoded transform data in portable attribute. + return octahedral_transform_.TransferToAttribute(portable_attribute()); +} + +bool SequentialNormalAttributeDecoder::StoreValues(uint32_t num_points) { + // Convert all quantized values back to floats. + return octahedral_transform_.InverseTransformAttribute( + *GetPortableAttribute(), attribute()); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h new file mode 100644 index 000000000..8c2d801b7 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_decoder.h @@ -0,0 +1,83 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_ + +#include "draco/attributes/attribute_octahedron_transform.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_decoding_transform.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_decoding_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for attributes encoded with SequentialNormalAttributeEncoder. +class SequentialNormalAttributeDecoder + : public SequentialIntegerAttributeDecoder { + public: + SequentialNormalAttributeDecoder(); + bool Init(PointCloudDecoder *decoder, int attribute_id) override; + + protected: + int32_t GetNumValueComponents() const override { + return 2; // We quantize everything into two components. + } + bool DecodeIntegerValues(const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransform( + const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool StoreValues(uint32_t num_points) override; + + private: + AttributeOctahedronTransform octahedral_transform_; + + std::unique_ptr> + CreateIntPredictionScheme( + PredictionSchemeMethod method, + PredictionSchemeTransformType transform_type) override { + switch (transform_type) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON: { + typedef PredictionSchemeNormalOctahedronDecodingTransform + Transform; + // At this point the decoder has not read the quantization bits, + // which is why we must construct the transform by default. + // See Transform.DecodeTransformData for more details. + return CreatePredictionSchemeForDecoder( + method, attribute_id(), decoder()); + } +#endif + case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED: { + typedef PredictionSchemeNormalOctahedronCanonicalizedDecodingTransform< + int32_t> + Transform; + // At this point the decoder has not read the quantization bits, + // which is why we must construct the transform by default. + // See Transform.DecodeTransformData for more details. + return CreatePredictionSchemeForDecoder( + method, attribute_id(), decoder()); + } + default: + return nullptr; // Currently, we support only octahedron transform and + // octahedron transform canonicalized. + } + } +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc new file mode 100644 index 000000000..2e20e89e6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.cc @@ -0,0 +1,57 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_normal_attribute_encoder.h" + +#include "draco/compression/attributes/normal_compression_utils.h" + +namespace draco { + +bool SequentialNormalAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id)) + return false; + // Currently this encoder works only for 3-component normal vectors. + if (attribute()->num_components() != 3) { + return false; + } + + // Initialize AttributeOctahedronTransform. + const int quantization_bits = encoder->options()->GetAttributeInt( + attribute_id, "quantization_bits", -1); + if (quantization_bits < 1) { + return false; + } + attribute_octahedron_transform_.SetParameters(quantization_bits); + return true; +} + +bool SequentialNormalAttributeEncoder::EncodeDataNeededByPortableTransform( + EncoderBuffer *out_buffer) { + return attribute_octahedron_transform_.EncodeParameters(out_buffer); +} + +bool SequentialNormalAttributeEncoder::PrepareValues( + const std::vector &point_ids, int num_points) { + auto portable_att = attribute_octahedron_transform_.InitTransformedAttribute( + *(attribute()), point_ids.size()); + if (!attribute_octahedron_transform_.TransformAttribute( + *(attribute()), point_ids, portable_att.get())) { + return false; + } + SetPortableAttribute(std::move(portable_att)); + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h new file mode 100644 index 000000000..53705c598 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_normal_attribute_encoder.h @@ -0,0 +1,82 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_ + +#include "draco/attributes/attribute_octahedron_transform.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" +#include "draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" +#include "draco/compression/config/compression_shared.h" + +namespace draco { + +// Class for encoding normal vectors using an octahedral encoding, see Cigolle +// et al.'14 “A Survey of Efficient Representations for Independent Unit +// Vectors”. Compared to the basic quantization encoder, this encoder results +// in a better compression rate under the same accuracy settings. Note that this +// encoder doesn't preserve the lengths of input vectors, therefore it will not +// work correctly when the input values are not normalized. +class SequentialNormalAttributeEncoder + : public SequentialIntegerAttributeEncoder { + public: + uint8_t GetUniqueId() const override { + return SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS; + } + bool IsLossyEncoder() const override { return true; } + + bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) override; + + protected: + bool Init(PointCloudEncoder *encoder, int attribute_id) override; + + // Put quantized values in portable attribute for sequential encoding. + bool PrepareValues(const std::vector &point_ids, + int num_points) override; + + std::unique_ptr> + CreateIntPredictionScheme(PredictionSchemeMethod /* method */) override { + typedef PredictionSchemeNormalOctahedronCanonicalizedEncodingTransform< + int32_t> + Transform; + const int32_t quantization_bits = encoder()->options()->GetAttributeInt( + attribute_id(), "quantization_bits", -1); + const int32_t max_value = (1 << quantization_bits) - 1; + const Transform transform(max_value); + const PredictionSchemeMethod default_prediction_method = + SelectPredictionMethod(attribute_id(), encoder()); + const int32_t prediction_method = encoder()->options()->GetAttributeInt( + attribute_id(), "prediction_scheme", default_prediction_method); + + if (prediction_method == MESH_PREDICTION_GEOMETRIC_NORMAL) { + return CreatePredictionSchemeForEncoder( + MESH_PREDICTION_GEOMETRIC_NORMAL, attribute_id(), encoder(), + transform); + } + if (prediction_method == PREDICTION_DIFFERENCE) { + return CreatePredictionSchemeForEncoder( + PREDICTION_DIFFERENCE, attribute_id(), encoder(), transform); + } + DRACO_DCHECK(false); // Should never be reached. + return nullptr; + } + + // Used for the conversion to quantized normals in octahedral format. + AttributeOctahedronTransform attribute_octahedron_transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc new file mode 100644 index 000000000..3d306e7da --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.cc @@ -0,0 +1,88 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_quantization_attribute_decoder.h" + +#include "draco/core/quantization_utils.h" + +namespace draco { + +SequentialQuantizationAttributeDecoder:: + SequentialQuantizationAttributeDecoder() {} + +bool SequentialQuantizationAttributeDecoder::Init(PointCloudDecoder *decoder, + int attribute_id) { + if (!SequentialIntegerAttributeDecoder::Init(decoder, attribute_id)) { + return false; + } + const PointAttribute *const attribute = + decoder->point_cloud()->attribute(attribute_id); + // Currently we can quantize only floating point arguments. + if (attribute->data_type() != DT_FLOAT32) { + return false; + } + return true; +} + +bool SequentialQuantizationAttributeDecoder::DecodeIntegerValues( + const std::vector &point_ids, DecoderBuffer *in_buffer) { +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (decoder()->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0) && + !DecodeQuantizedDataInfo()) { + return false; + } +#endif + return SequentialIntegerAttributeDecoder::DecodeIntegerValues(point_ids, + in_buffer); +} + +bool SequentialQuantizationAttributeDecoder:: + DecodeDataNeededByPortableTransform( + const std::vector &point_ids, DecoderBuffer *in_buffer) { + if (decoder()->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 0)) { + // Decode quantization data here only for files with bitstream version 2.0+ + if (!DecodeQuantizedDataInfo()) { + return false; + } + } + + // Store the decoded transform data in portable attribute; + return quantization_transform_.TransferToAttribute(portable_attribute()); +} + +bool SequentialQuantizationAttributeDecoder::StoreValues(uint32_t num_points) { + return DequantizeValues(num_points); +} + +bool SequentialQuantizationAttributeDecoder::DecodeQuantizedDataInfo() { + // Get attribute used as source for decoding. + auto att = GetPortableAttribute(); + if (att == nullptr) { + // This should happen only in the backward compatibility mode. It will still + // work fine for this case because the only thing the quantization transform + // cares about is the number of components that is the same for both source + // and target attributes. + att = attribute(); + } + return quantization_transform_.DecodeParameters(*att, decoder()->buffer()); +} + +bool SequentialQuantizationAttributeDecoder::DequantizeValues( + uint32_t num_values) { + // Convert all quantized values back to floats. + return quantization_transform_.InverseTransformAttribute( + *GetPortableAttribute(), attribute()); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h new file mode 100644 index 000000000..ad372dcd8 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_decoder.h @@ -0,0 +1,52 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_decoder.h" +#include "draco/draco_features.h" + +namespace draco { + +// Decoder for attribute values encoded with the +// SequentialQuantizationAttributeEncoder. +class SequentialQuantizationAttributeDecoder + : public SequentialIntegerAttributeDecoder { + public: + SequentialQuantizationAttributeDecoder(); + bool Init(PointCloudDecoder *decoder, int attribute_id) override; + + protected: + bool DecodeIntegerValues(const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool DecodeDataNeededByPortableTransform( + const std::vector &point_ids, + DecoderBuffer *in_buffer) override; + bool StoreValues(uint32_t num_points) override; + + // Decodes data necessary for dequantizing the encoded values. + virtual bool DecodeQuantizedDataInfo(); + + // Dequantizes all values and stores them into the output attribute. + virtual bool DequantizeValues(uint32_t num_values); + + private: + AttributeQuantizationTransform quantization_transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc new file mode 100644 index 000000000..d3666f7a4 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc @@ -0,0 +1,86 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/attributes/sequential_quantization_attribute_encoder.h" + +#include "draco/core/quantization_utils.h" + +namespace draco { + +SequentialQuantizationAttributeEncoder:: + SequentialQuantizationAttributeEncoder() {} + +bool SequentialQuantizationAttributeEncoder::Init(PointCloudEncoder *encoder, + int attribute_id) { + if (!SequentialIntegerAttributeEncoder::Init(encoder, attribute_id)) { + return false; + } + // This encoder currently works only for floating point attributes. + const PointAttribute *const attribute = + encoder->point_cloud()->attribute(attribute_id); + if (attribute->data_type() != DT_FLOAT32) { + return false; + } + + // Initialize AttributeQuantizationTransform. + const int quantization_bits = encoder->options()->GetAttributeInt( + attribute_id, "quantization_bits", -1); + if (quantization_bits < 1) { + return false; + } + if (encoder->options()->IsAttributeOptionSet(attribute_id, + "quantization_origin") && + encoder->options()->IsAttributeOptionSet(attribute_id, + "quantization_range")) { + // Quantization settings are explicitly specified in the provided options. + std::vector quantization_origin(attribute->num_components()); + encoder->options()->GetAttributeVector(attribute_id, "quantization_origin", + attribute->num_components(), + &quantization_origin[0]); + const float range = encoder->options()->GetAttributeFloat( + attribute_id, "quantization_range", 1.f); + if (!attribute_quantization_transform_.SetParameters( + quantization_bits, quantization_origin.data(), + attribute->num_components(), range)) { + return false; + } + } else { + // Compute quantization settings from the attribute values. + if (!attribute_quantization_transform_.ComputeParameters( + *attribute, quantization_bits)) { + return false; + } + } + return true; +} + +bool SequentialQuantizationAttributeEncoder:: + EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) { + return attribute_quantization_transform_.EncodeParameters(out_buffer); +} + +bool SequentialQuantizationAttributeEncoder::PrepareValues( + const std::vector &point_ids, int num_points) { + auto portable_attribute = + attribute_quantization_transform_.InitTransformedAttribute( + *attribute(), point_ids.size()); + if (!attribute_quantization_transform_.TransformAttribute( + *(attribute()), point_ids, portable_attribute.get())) { + return false; + } + SetPortableAttribute(std::move(portable_attribute)); + return true; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h new file mode 100644 index 000000000..e9762bdd6 --- /dev/null +++ b/contrib/draco/src/draco/compression/attributes/sequential_quantization_attribute_encoder.h @@ -0,0 +1,52 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_ +#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_ + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/attributes/sequential_integer_attribute_encoder.h" + +namespace draco { + +class MeshEncoder; + +// Attribute encoder that quantizes floating point attribute values. The +// quantized values can be optionally compressed using an entropy coding. +class SequentialQuantizationAttributeEncoder + : public SequentialIntegerAttributeEncoder { + public: + SequentialQuantizationAttributeEncoder(); + uint8_t GetUniqueId() const override { + return SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION; + } + bool Init(PointCloudEncoder *encoder, int attribute_id) override; + + bool IsLossyEncoder() const override { return true; } + + bool EncodeDataNeededByPortableTransform(EncoderBuffer *out_buffer) override; + + protected: + // Put quantized values in portable attribute for sequential encoding. + bool PrepareValues(const std::vector &point_ids, + int num_points) override; + + private: + // Used for the quantization. + AttributeQuantizationTransform attribute_quantization_transform_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_QUANTIZATION_ATTRIBUTE_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h new file mode 100644 index 000000000..faacbd5b9 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h @@ -0,0 +1,43 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides shared functions for adaptive rANS bit coding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_ +#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_ + +#include "draco/core/macros.h" + +namespace draco { + +// Clamp the probability p to a uint8_t in the range [1,255]. +inline uint8_t clamp_probability(double p) { + DRACO_DCHECK_LE(p, 1.0); + DRACO_DCHECK_LE(0.0, p); + uint32_t p_int = static_cast((p * 256) + 0.5); + p_int -= (p_int == 256); + p_int += (p_int == 0); + return static_cast(p_int); +} + +// Update the probability according to new incoming bit. +inline double update_probability(double old_p, bool bit) { + static constexpr double w = 128.0; + static constexpr double w0 = (w - 1.0) / w; + static constexpr double w1 = 1.0 / w; + return old_p * w0 + (!bit) * w1; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_CODING_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc new file mode 100644 index 000000000..056842c4a --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.cc @@ -0,0 +1,70 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h" + +#include "draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h" + +namespace draco { + +AdaptiveRAnsBitDecoder::AdaptiveRAnsBitDecoder() : p0_f_(0.5) {} + +AdaptiveRAnsBitDecoder::~AdaptiveRAnsBitDecoder() { Clear(); } + +bool AdaptiveRAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + Clear(); + + uint32_t size_in_bytes; + if (!source_buffer->Decode(&size_in_bytes)) { + return false; + } + if (size_in_bytes > source_buffer->remaining_size()) { + return false; + } + if (ans_read_init(&ans_decoder_, + reinterpret_cast( + const_cast(source_buffer->data_head())), + size_in_bytes) != 0) { + return false; + } + source_buffer->Advance(size_in_bytes); + return true; +} + +bool AdaptiveRAnsBitDecoder::DecodeNextBit() { + const uint8_t p0 = clamp_probability(p0_f_); + const bool bit = static_cast(rabs_read(&ans_decoder_, p0)); + p0_f_ = update_probability(p0_f_, bit); + return bit; +} + +void AdaptiveRAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, + uint32_t *value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + uint32_t result = 0; + while (nbits) { + result = (result << 1) + DecodeNextBit(); + --nbits; + } + *value = result; +} + +void AdaptiveRAnsBitDecoder::Clear() { + ans_read_end(&ans_decoder_); + p0_f_ = 0.5; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h new file mode 100644 index 000000000..a1ea011dd --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_decoder.h @@ -0,0 +1,54 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS bit decoding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_ + +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// Class for decoding a sequence of bits that were encoded with +// AdaptiveRAnsBitEncoder. +class AdaptiveRAnsBitDecoder { + public: + AdaptiveRAnsBitDecoder(); + ~AdaptiveRAnsBitDecoder(); + + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit(); + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value); + + void EndDecoding() {} + + private: + void Clear(); + + AnsDecoder ans_decoder_; + double p0_f_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc new file mode 100644 index 000000000..5ce9dc388 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc @@ -0,0 +1,59 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h" + +#include "draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h" + +namespace draco { + +AdaptiveRAnsBitEncoder::AdaptiveRAnsBitEncoder() {} + +AdaptiveRAnsBitEncoder::~AdaptiveRAnsBitEncoder() { Clear(); } + +void AdaptiveRAnsBitEncoder::StartEncoding() { Clear(); } + +void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + // Buffer for ans to write. + std::vector buffer(bits_.size() + 16); + AnsCoder ans_coder; + ans_write_init(&ans_coder, buffer.data()); + + // Unfortunately we have to encode the bits in reversed order, while the + // probabilities that should be given are those of the forward sequence. + double p0_f = 0.5; + std::vector p0s; + p0s.reserve(bits_.size()); + for (bool b : bits_) { + p0s.push_back(clamp_probability(p0_f)); + p0_f = update_probability(p0_f, b); + } + auto bit = bits_.rbegin(); + auto pit = p0s.rbegin(); + while (bit != bits_.rend()) { + rabs_write(&ans_coder, *bit, *pit); + ++bit; + ++pit; + } + + const uint32_t size_in_bytes = ans_write_end(&ans_coder); + target_buffer->Encode(size_in_bytes); + target_buffer->Encode(buffer.data(), size_in_bytes); + + Clear(); +} + +void AdaptiveRAnsBitEncoder::Clear() { bits_.clear(); } + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h new file mode 100644 index 000000000..9b1832844 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h @@ -0,0 +1,61 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS bit encoding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_ + +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Class for adaptive encoding a sequence of bits using rANS. +class AdaptiveRAnsBitEncoder { + public: + AdaptiveRAnsBitEncoder(); + ~AdaptiveRAnsBitEncoder(); + + // Must be called before any Encode* function is called. + void StartEncoding(); + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { bits_.push_back(bit); } + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + uint32_t selector = (1 << (nbits - 1)); + while (selector) { + EncodeBit(value & selector); + selector = selector >> 1; + } + } + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_ADAPTIVE_RANS_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc new file mode 100644 index 000000000..2abe3382a --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.cc @@ -0,0 +1,54 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/direct_bit_decoder.h" + +namespace draco { + +DirectBitDecoder::DirectBitDecoder() : pos_(bits_.end()), num_used_bits_(0) {} + +DirectBitDecoder::~DirectBitDecoder() { Clear(); } + +bool DirectBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + Clear(); + uint32_t size_in_bytes; + if (!source_buffer->Decode(&size_in_bytes)) { + return false; + } + + // Check that size_in_bytes is > 0 and a multiple of 4 as the encoder always + // encodes 32 bit elements. + if (size_in_bytes == 0 || size_in_bytes & 0x3) { + return false; + } + if (size_in_bytes > source_buffer->remaining_size()) { + return false; + } + const uint32_t num_32bit_elements = size_in_bytes / 4; + bits_.resize(num_32bit_elements); + if (!source_buffer->Decode(bits_.data(), size_in_bytes)) { + return false; + } + pos_ = bits_.begin(); + num_used_bits_ = 0; + return true; +} + +void DirectBitDecoder::Clear() { + bits_.clear(); + num_used_bits_ = 0; + pos_ = bits_.end(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h new file mode 100644 index 000000000..b9fbc2d6f --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_decoder.h @@ -0,0 +1,90 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_ + +#include + +#include "draco/core/decoder_buffer.h" + +namespace draco { + +class DirectBitDecoder { + public: + DirectBitDecoder(); + ~DirectBitDecoder(); + + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit() { + const uint32_t selector = 1 << (31 - num_used_bits_); + if (pos_ == bits_.end()) { + return false; + } + const bool bit = *pos_ & selector; + ++num_used_bits_; + if (num_used_bits_ == 32) { + ++pos_; + num_used_bits_ = 0; + } + return bit; + } + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + const int remaining = 32 - num_used_bits_; + if (nbits <= remaining) { + if (pos_ == bits_.end()) { + *value = 0; + return; + } + *value = (*pos_ << num_used_bits_) >> (32 - nbits); + num_used_bits_ += nbits; + if (num_used_bits_ == 32) { + ++pos_; + num_used_bits_ = 0; + } + } else { + if (pos_ + 1 == bits_.end()) { + *value = 0; + return; + } + const uint32_t value_l = ((*pos_) << num_used_bits_); + num_used_bits_ = nbits - remaining; + ++pos_; + const uint32_t value_r = (*pos_) >> (32 - num_used_bits_); + *value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r; + } + } + + void EndDecoding() {} + + private: + void Clear(); + + std::vector bits_; + std::vector::const_iterator pos_; + uint32_t num_used_bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc new file mode 100644 index 000000000..d39143cf5 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.cc @@ -0,0 +1,39 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/direct_bit_encoder.h" + +namespace draco { + +DirectBitEncoder::DirectBitEncoder() : local_bits_(0), num_local_bits_(0) {} + +DirectBitEncoder::~DirectBitEncoder() { Clear(); } + +void DirectBitEncoder::StartEncoding() { Clear(); } + +void DirectBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + bits_.push_back(local_bits_); + const uint32_t size_in_byte = static_cast(bits_.size()) * 4; + target_buffer->Encode(size_in_byte); + target_buffer->Encode(bits_.data(), size_in_byte); + Clear(); +} + +void DirectBitEncoder::Clear() { + bits_.clear(); + local_bits_ = 0; + num_local_bits_ = 0; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h new file mode 100644 index 000000000..705b2ca93 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/direct_bit_encoder.h @@ -0,0 +1,89 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_ + +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +class DirectBitEncoder { + public: + DirectBitEncoder(); + ~DirectBitEncoder(); + + // Must be called before any Encode* function is called. + void StartEncoding(); + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { + if (bit) { + local_bits_ |= 1 << (31 - num_local_bits_); + } + num_local_bits_++; + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + num_local_bits_ = 0; + local_bits_ = 0; + } + } + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + const int remaining = 32 - num_local_bits_; + + // Make sure there are no leading bits that should not be encoded and + // start from here. + value = value << (32 - nbits); + if (nbits <= remaining) { + value = value >> num_local_bits_; + local_bits_ = local_bits_ | value; + num_local_bits_ += nbits; + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + local_bits_ = 0; + num_local_bits_ = 0; + } + } else { + value = value >> (32 - nbits); + num_local_bits_ = nbits - remaining; + const uint32_t value_l = value >> num_local_bits_; + local_bits_ = local_bits_ | value_l; + bits_.push_back(local_bits_); + local_bits_ = value << (32 - num_local_bits_); + } + } + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector bits_; + uint32_t local_bits_; + uint32_t num_local_bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_DIRECT_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h new file mode 100644 index 000000000..c14058b65 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_decoder.h @@ -0,0 +1,77 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_ + +#include + +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// See FoldedBit32Encoder for more details. +template +class FoldedBit32Decoder { + public: + FoldedBit32Decoder() {} + ~FoldedBit32Decoder() {} + + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer) { + for (int i = 0; i < 32; i++) { + if (!folded_number_decoders_[i].StartDecoding(source_buffer)) { + return false; + } + } + return bit_decoder_.StartDecoding(source_buffer); + } + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit() { return bit_decoder_.DecodeNextBit(); } + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value) { + uint32_t result = 0; + for (int i = 0; i < nbits; ++i) { + const bool bit = folded_number_decoders_[i].DecodeNextBit(); + result = (result << 1) + bit; + } + *value = result; + } + + void EndDecoding() { + for (int i = 0; i < 32; i++) { + folded_number_decoders_[i].EndDecoding(); + } + bit_decoder_.EndDecoding(); + } + + private: + void Clear() { + for (int i = 0; i < 32; i++) { + folded_number_decoders_[i].Clear(); + } + bit_decoder_.Clear(); + } + + std::array folded_number_decoders_; + BitDecoderT bit_decoder_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h new file mode 100644 index 000000000..375b38a61 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/folded_integer_bit_encoder.h @@ -0,0 +1,82 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides direct encoding of bits with arithmetic encoder interface. +#ifndef DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_ + +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// This coding scheme considers every bit of an (up to) 32bit integer as a +// separate context. This can be a significant advantage when encoding numbers +// where it is more likely that the front bits are zero. +// The behavior is essentially the same as other arithmetic encoding schemes, +// the only difference is that encoding and decoding of bits must be absolutely +// symmetric, bits handed in by EncodeBit32 must be also decoded in this way. +// This is the FoldedBit32Encoder, see also FoldedBit32Decoder. +template +class FoldedBit32Encoder { + public: + FoldedBit32Encoder() {} + ~FoldedBit32Encoder() {} + + // Must be called before any Encode* function is called. + void StartEncoding() { + for (int i = 0; i < 32; i++) { + folded_number_encoders_[i].StartEncoding(); + } + bit_encoder_.StartEncoding(); + } + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { bit_encoder_.EncodeBit(bit); } + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value) { + uint32_t selector = 1 << (nbits - 1); + for (int i = 0; i < nbits; i++) { + const bool bit = (value & selector); + folded_number_encoders_[i].EncodeBit(bit); + selector = selector >> 1; + } + } + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer) { + for (int i = 0; i < 32; i++) { + folded_number_encoders_[i].EndEncoding(target_buffer); + } + bit_encoder_.EndEncoding(target_buffer); + } + + private: + void Clear() { + for (int i = 0; i < 32; i++) { + folded_number_encoders_[i].Clear(); + } + bit_encoder_.Clear(); + } + + std::array folded_number_encoders_; + BitEncoderT bit_encoder_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_FOLDED_INTEGER_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc new file mode 100644 index 000000000..a9b8fb9e9 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.cc @@ -0,0 +1,82 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/rans_bit_decoder.h" + +#include "draco/compression/config/compression_shared.h" +#include "draco/core/bit_utils.h" +#include "draco/core/varint_decoding.h" + +namespace draco { + +RAnsBitDecoder::RAnsBitDecoder() : prob_zero_(0) {} + +RAnsBitDecoder::~RAnsBitDecoder() { Clear(); } + +bool RAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + Clear(); + + if (!source_buffer->Decode(&prob_zero_)) { + return false; + } + + uint32_t size_in_bytes; +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (source_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { + if (!source_buffer->Decode(&size_in_bytes)) { + return false; + } + + } else +#endif + { + if (!DecodeVarint(&size_in_bytes, source_buffer)) { + return false; + } + } + + if (size_in_bytes > source_buffer->remaining_size()) { + return false; + } + + if (ans_read_init(&ans_decoder_, + reinterpret_cast( + const_cast(source_buffer->data_head())), + size_in_bytes) != 0) { + return false; + } + source_buffer->Advance(size_in_bytes); + return true; +} + +bool RAnsBitDecoder::DecodeNextBit() { + const uint8_t bit = rabs_read(&ans_decoder_, prob_zero_); + return bit > 0; +} + +void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + uint32_t result = 0; + while (nbits) { + result = (result << 1) + DecodeNextBit(); + --nbits; + } + *value = result; +} + +void RAnsBitDecoder::Clear() { ans_read_end(&ans_decoder_); } + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h new file mode 100644 index 000000000..25d243eac --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_decoder.h @@ -0,0 +1,55 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS coding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_ + +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/core/decoder_buffer.h" +#include "draco/draco_features.h" + +namespace draco { + +// Class for decoding a sequence of bits that were encoded with RAnsBitEncoder. +class RAnsBitDecoder { + public: + RAnsBitDecoder(); + ~RAnsBitDecoder(); + + // Sets |source_buffer| as the buffer to decode bits from. + // Returns false when the data is invalid. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit(); + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value); + + void EndDecoding() {} + + private: + void Clear(); + + AnsDecoder ans_decoder_; + uint8_t prob_zero_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc new file mode 100644 index 000000000..8d00ea352 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.cc @@ -0,0 +1,125 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/bit_coders/rans_bit_encoder.h" + +#include "draco/compression/entropy/ans.h" +#include "draco/core/bit_utils.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +RAnsBitEncoder::RAnsBitEncoder() : local_bits_(0), num_local_bits_(0) {} + +RAnsBitEncoder::~RAnsBitEncoder() { Clear(); } + +void RAnsBitEncoder::StartEncoding() { Clear(); } + +void RAnsBitEncoder::EncodeBit(bool bit) { + if (bit) { + bit_counts_[1]++; + local_bits_ |= 1 << num_local_bits_; + } else { + bit_counts_[0]++; + } + num_local_bits_++; + + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + num_local_bits_ = 0; + local_bits_ = 0; + } +} + +void RAnsBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_EQ(true, nbits <= 32); + DRACO_DCHECK_EQ(true, nbits > 0); + + const uint32_t reversed = ReverseBits32(value) >> (32 - nbits); + const int ones = CountOneBits32(reversed); + bit_counts_[0] += (nbits - ones); + bit_counts_[1] += ones; + + const int remaining = 32 - num_local_bits_; + + if (nbits <= remaining) { + CopyBits32(&local_bits_, num_local_bits_, reversed, 0, nbits); + num_local_bits_ += nbits; + if (num_local_bits_ == 32) { + bits_.push_back(local_bits_); + local_bits_ = 0; + num_local_bits_ = 0; + } + } else { + CopyBits32(&local_bits_, num_local_bits_, reversed, 0, remaining); + bits_.push_back(local_bits_); + local_bits_ = 0; + CopyBits32(&local_bits_, 0, reversed, remaining, nbits - remaining); + num_local_bits_ = nbits - remaining; + } +} + +void RAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + uint64_t total = bit_counts_[1] + bit_counts_[0]; + if (total == 0) { + total++; + } + + // The probability interval [0,1] is mapped to values of [0, 256]. However, + // the coding scheme can not deal with probabilities of 0 or 1, which is why + // we must clamp the values to interval [1, 255]. Specifically 128 + // corresponds to 0.5 exactly. And the value can be given as uint8_t. + const uint32_t zero_prob_raw = static_cast( + ((bit_counts_[0] / static_cast(total)) * 256.0) + 0.5); + + uint8_t zero_prob = 255; + if (zero_prob_raw < 255) { + zero_prob = static_cast(zero_prob_raw); + } + + zero_prob += (zero_prob == 0); + + // Space for 32 bit integer and some extra space. + std::vector buffer((bits_.size() + 8) * 8); + AnsCoder ans_coder; + ans_write_init(&ans_coder, buffer.data()); + + for (int i = num_local_bits_ - 1; i >= 0; --i) { + const uint8_t bit = (local_bits_ >> i) & 1; + rabs_write(&ans_coder, bit, zero_prob); + } + for (auto it = bits_.rbegin(); it != bits_.rend(); ++it) { + const uint32_t bits = *it; + for (int i = 31; i >= 0; --i) { + const uint8_t bit = (bits >> i) & 1; + rabs_write(&ans_coder, bit, zero_prob); + } + } + + const int size_in_bytes = ans_write_end(&ans_coder); + target_buffer->Encode(zero_prob); + EncodeVarint(static_cast(size_in_bytes), target_buffer); + target_buffer->Encode(buffer.data(), size_in_bytes); + + Clear(); +} + +void RAnsBitEncoder::Clear() { + bit_counts_.assign(2, 0); + bits_.clear(); + local_bits_ = 0; + num_local_bits_ = 0; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h new file mode 100644 index 000000000..1993dd3d3 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_bit_encoder.h @@ -0,0 +1,57 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides basic classes and functions for rANS coding. +#ifndef DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_ + +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Class for encoding a sequence of bits using rANS. The probability table used +// to encode the bits is based off the total counts of bits. +// TODO(fgalligan): Investigate using an adaptive table for more compression. +class RAnsBitEncoder { + public: + RAnsBitEncoder(); + ~RAnsBitEncoder(); + + // Must be called before any Encode* function is called. + void StartEncoding(); + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit); + + // Encode |nbits| of |value|, starting from the least significant bit. + // |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value); + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector bit_counts_; + std::vector bits_; + uint32_t local_bits_; + uint32_t num_local_bits_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_RANS_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc b/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc new file mode 100644 index 000000000..9509ad9f3 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/rans_coding_test.cc @@ -0,0 +1,9 @@ +#include "draco/compression/bit_coders/adaptive_rans_bit_decoder.h" +#include "draco/compression/bit_coders/adaptive_rans_bit_encoder.h" +#include "draco/compression/bit_coders/rans_bit_decoder.h" +#include "draco/compression/bit_coders/rans_bit_encoder.h" +#include "draco/core/draco_test_base.h" + +// Just including rans_coding.h and adaptive_rans_coding.h gets an asan error +// when compiling (blaze test :rans_coding_test --config=asan) +TEST(RansCodingTest, LinkerTest) {} diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc new file mode 100644 index 000000000..8ed50ef92 --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.cc @@ -0,0 +1,49 @@ +#include "draco/compression/bit_coders/symbol_bit_decoder.h" + +#include "draco/compression/entropy/symbol_decoding.h" + +namespace draco { + +bool SymbolBitDecoder::StartDecoding(DecoderBuffer *source_buffer) { + uint32_t size; + if (!source_buffer->Decode(&size)) { + return false; + } + + symbols_.resize(size); + if (!DecodeSymbols(size, 1, source_buffer, symbols_.data())) { + return false; + } + std::reverse(symbols_.begin(), symbols_.end()); + return true; +} + +bool SymbolBitDecoder::DecodeNextBit() { + uint32_t symbol; + DecodeLeastSignificantBits32(1, &symbol); + DRACO_DCHECK(symbol == 0 || symbol == 1); + return symbol == 1; +} + +void SymbolBitDecoder::DecodeLeastSignificantBits32(int nbits, + uint32_t *value) { + DRACO_DCHECK_LE(1, nbits); + DRACO_DCHECK_LE(nbits, 32); + DRACO_DCHECK_NE(value, nullptr); + // Testing: check to make sure there is something to decode. + DRACO_DCHECK_GT(symbols_.size(), 0); + + (*value) = symbols_.back(); + symbols_.pop_back(); + + const int discarded_bits = 32 - nbits; + (*value) <<= discarded_bits; + (*value) >>= discarded_bits; +} + +void SymbolBitDecoder::Clear() { + symbols_.clear(); + symbols_.shrink_to_fit(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h new file mode 100644 index 000000000..909d7174f --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_decoder.h @@ -0,0 +1,36 @@ +#ifndef DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_ + +#include +#include + +#include "draco/core/decoder_buffer.h" + +namespace draco { + +// Class for decoding bits using the symbol entropy encoding. Wraps +// |DecodeSymbols|. Note that this uses a symbol-based encoding scheme for +// encoding bits. +class SymbolBitDecoder { + public: + // Sets |source_buffer| as the buffer to decode bits from. + bool StartDecoding(DecoderBuffer *source_buffer); + + // Decode one bit. Returns true if the bit is a 1, otherwise false. + bool DecodeNextBit(); + + // Decode the next |nbits| and return the sequence in |value|. |nbits| must be + // > 0 and <= 32. + void DecodeLeastSignificantBits32(int nbits, uint32_t *value); + + void EndDecoding() { Clear(); } + + private: + void Clear(); + + std::vector symbols_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc new file mode 100644 index 000000000..83834236f --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.cc @@ -0,0 +1,30 @@ +#include "draco/compression/bit_coders/symbol_bit_encoder.h" + +#include "draco/compression/entropy/symbol_encoding.h" + +namespace draco { + +void SymbolBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) { + DRACO_DCHECK_LE(1, nbits); + DRACO_DCHECK_LE(nbits, 32); + + const int discarded_bits = 32 - nbits; + value <<= discarded_bits; + value >>= discarded_bits; + + symbols_.push_back(value); +} + +void SymbolBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { + target_buffer->Encode(static_cast(symbols_.size())); + EncodeSymbols(symbols_.data(), static_cast(symbols_.size()), 1, nullptr, + target_buffer); + Clear(); +} + +void SymbolBitEncoder::Clear() { + symbols_.clear(); + symbols_.shrink_to_fit(); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h new file mode 100644 index 000000000..7f1570c1a --- /dev/null +++ b/contrib/draco/src/draco/compression/bit_coders/symbol_bit_encoder.h @@ -0,0 +1,36 @@ +#ifndef DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_ +#define DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_ + +#include +#include + +#include "draco/core/encoder_buffer.h" + +namespace draco { + +// Class for encoding bits using the symbol entropy encoding. Wraps +// |EncodeSymbols|. Note that this uses a symbol-based encoding scheme for +// encoding bits. +class SymbolBitEncoder { + public: + // Must be called before any Encode* function is called. + void StartEncoding() { Clear(); } + + // Encode one bit. If |bit| is true encode a 1, otherwise encode a 0. + void EncodeBit(bool bit) { EncodeLeastSignificantBits32(1, bit ? 1 : 0); } + + // Encode |nbits| LSBs of |value| as a symbol. |nbits| must be > 0 and <= 32. + void EncodeLeastSignificantBits32(int nbits, uint32_t value); + + // Ends the bit encoding and stores the result into the target_buffer. + void EndEncoding(EncoderBuffer *target_buffer); + + private: + void Clear(); + + std::vector symbols_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_BIT_CODERS_SYMBOL_BIT_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/config/compression_shared.h b/contrib/draco/src/draco/compression/config/compression_shared.h new file mode 100644 index 000000000..c43f303bd --- /dev/null +++ b/contrib/draco/src/draco/compression/config/compression_shared.h @@ -0,0 +1,155 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_ +#define DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_ + +#include + +#include "draco/core/macros.h" +#include "draco/draco_features.h" + +namespace draco { + +// Latest Draco bit-stream version. +static constexpr uint8_t kDracoPointCloudBitstreamVersionMajor = 2; +static constexpr uint8_t kDracoPointCloudBitstreamVersionMinor = 3; +static constexpr uint8_t kDracoMeshBitstreamVersionMajor = 2; +static constexpr uint8_t kDracoMeshBitstreamVersionMinor = 2; + +// Concatenated latest bit-stream version. +static constexpr uint16_t kDracoPointCloudBitstreamVersion = + DRACO_BITSTREAM_VERSION(kDracoPointCloudBitstreamVersionMajor, + kDracoPointCloudBitstreamVersionMinor); + +static constexpr uint16_t kDracoMeshBitstreamVersion = DRACO_BITSTREAM_VERSION( + kDracoMeshBitstreamVersionMajor, kDracoMeshBitstreamVersionMinor); + +// Currently, we support point cloud and triangular mesh encoding. +// TODO(draco-eng) Convert enum to enum class (safety, not performance). +enum EncodedGeometryType { + INVALID_GEOMETRY_TYPE = -1, + POINT_CLOUD = 0, + TRIANGULAR_MESH, + NUM_ENCODED_GEOMETRY_TYPES +}; + +// List of encoding methods for point clouds. +enum PointCloudEncodingMethod { + POINT_CLOUD_SEQUENTIAL_ENCODING = 0, + POINT_CLOUD_KD_TREE_ENCODING +}; + +// List of encoding methods for meshes. +enum MeshEncoderMethod { + MESH_SEQUENTIAL_ENCODING = 0, + MESH_EDGEBREAKER_ENCODING, +}; + +// List of various attribute encoders supported by our framework. The entries +// are used as unique identifiers of the encoders and their values should not +// be changed! +enum AttributeEncoderType { + BASIC_ATTRIBUTE_ENCODER = 0, + MESH_TRAVERSAL_ATTRIBUTE_ENCODER, + KD_TREE_ATTRIBUTE_ENCODER, +}; + +// List of various sequential attribute encoder/decoders that can be used in our +// pipeline. The values represent unique identifiers used by the decoder and +// they should not be changed. +enum SequentialAttributeEncoderType { + SEQUENTIAL_ATTRIBUTE_ENCODER_GENERIC = 0, + SEQUENTIAL_ATTRIBUTE_ENCODER_INTEGER, + SEQUENTIAL_ATTRIBUTE_ENCODER_QUANTIZATION, + SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS, +}; + +// List of all prediction methods currently supported by our framework. +enum PredictionSchemeMethod { + // Special value indicating that no prediction scheme was used. + PREDICTION_NONE = -2, + // Used when no specific prediction scheme is required. + PREDICTION_UNDEFINED = -1, + PREDICTION_DIFFERENCE = 0, + MESH_PREDICTION_PARALLELOGRAM = 1, + MESH_PREDICTION_MULTI_PARALLELOGRAM = 2, + MESH_PREDICTION_TEX_COORDS_DEPRECATED = 3, + MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM = 4, + MESH_PREDICTION_TEX_COORDS_PORTABLE = 5, + MESH_PREDICTION_GEOMETRIC_NORMAL = 6, + NUM_PREDICTION_SCHEMES +}; + +// List of all prediction scheme transforms used by our framework. +enum PredictionSchemeTransformType { + PREDICTION_TRANSFORM_NONE = -1, + // Basic delta transform where the prediction is computed as difference the + // predicted and original value. + PREDICTION_TRANSFORM_DELTA = 0, + // An improved delta transform where all computed delta values are wrapped + // around a fixed interval which lowers the entropy. + PREDICTION_TRANSFORM_WRAP = 1, + // Specialized transform for normal coordinates using inverted tiles. + PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON = 2, + // Specialized transform for normal coordinates using canonicalized inverted + // tiles. + PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED = 3, + // The number of valid (non-negative) prediction scheme transform types. + NUM_PREDICTION_SCHEME_TRANSFORM_TYPES +}; + +// List of all mesh traversal methods supported by Draco framework. +enum MeshTraversalMethod { + MESH_TRAVERSAL_DEPTH_FIRST = 0, + MESH_TRAVERSAL_PREDICTION_DEGREE = 1, + NUM_TRAVERSAL_METHODS +}; + +// List of all variant of the edgebreaker method that is used for compression +// of mesh connectivity. +enum MeshEdgebreakerConnectivityEncodingMethod { + MESH_EDGEBREAKER_STANDARD_ENCODING = 0, + MESH_EDGEBREAKER_PREDICTIVE_ENCODING = 1, // Deprecated. + MESH_EDGEBREAKER_VALENCE_ENCODING = 2, +}; + +// Draco header V1 +struct DracoHeader { + int8_t draco_string[5]; + uint8_t version_major; + uint8_t version_minor; + uint8_t encoder_type; + uint8_t encoder_method; + uint16_t flags; +}; + +enum NormalPredictionMode { + ONE_TRIANGLE = 0, // To be deprecated. + TRIANGLE_AREA = 1, +}; + +// Different methods used for symbol entropy encoding. +enum SymbolCodingMethod { + SYMBOL_CODING_TAGGED = 0, + SYMBOL_CODING_RAW = 1, + NUM_SYMBOL_CODING_METHODS, +}; + +// Mask for setting and getting the bit for metadata in |flags| of header. +#define METADATA_FLAG_MASK 0x8000 + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_ diff --git a/contrib/draco/src/draco/compression/config/decoder_options.h b/contrib/draco/src/draco/compression/config/decoder_options.h new file mode 100644 index 000000000..3b3889993 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/decoder_options.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_ +#define DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_ + +#include +#include + +#include "draco/attributes/geometry_attribute.h" +#include "draco/compression/config/draco_options.h" + +namespace draco { + +// Class containing options that can be passed to PointCloudDecoder to control +// decoding of the input geometry. The options can be specified either for the +// whole geometry or for a specific attribute type. Each option is identified +// by a unique name stored as an std::string. +typedef DracoOptions DecoderOptions; + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_DECODER_OPTIONS_H_ diff --git a/contrib/draco/src/draco/compression/config/decoder_options_test.cc b/contrib/draco/src/draco/compression/config/decoder_options_test.cc new file mode 100644 index 000000000..a5cd7f106 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/decoder_options_test.cc @@ -0,0 +1,67 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/config/decoder_options.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +class DecoderOptionsTest : public ::testing::Test { + protected: + DecoderOptionsTest() {} +}; + +TEST_F(DecoderOptionsTest, TestOptions) { + // This test verifies that we can update global and attribute options of the + // DecoderOptions class instance. + draco::DecoderOptions options; + options.SetGlobalInt("test", 3); + ASSERT_EQ(options.GetGlobalInt("test", -1), 3); + + options.SetAttributeInt(draco::GeometryAttribute::POSITION, "test", 1); + options.SetAttributeInt(draco::GeometryAttribute::GENERIC, "test", 2); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", -1), + 3); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test", -1), + 1); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::GENERIC, "test", -1), + 2); +} + +TEST_F(DecoderOptionsTest, TestAttributeOptionsAccessors) { + // This test verifies that we can query options stored in DecoderOptions + // class instance. + draco::DecoderOptions options; + options.SetGlobalInt("test", 1); + options.SetAttributeInt(draco::GeometryAttribute::POSITION, "test", 2); + options.SetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", 3); + + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test", -1), + 2); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::POSITION, "test2", -1), + -1); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::TEX_COORD, "test", -1), + 3); + ASSERT_EQ( + options.GetAttributeInt(draco::GeometryAttribute::NORMAL, "test", -1), 1); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/config/draco_options.h b/contrib/draco/src/draco/compression/config/draco_options.h new file mode 100644 index 000000000..2bd4a3b67 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/draco_options.h @@ -0,0 +1,249 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_ +#define DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_ + +#include +#include + +#include "draco/core/options.h" + +namespace draco { + +// Base option class used to control encoding and decoding. The geometry coding +// can be controlled through the following options: +// 1. Global options - Options specific to overall geometry or options common +// for all attributes +// 2. Per attribute options - Options specific to a given attribute. +// Each attribute is identified by the template +// argument AttributeKeyT that can be for example +// the attribute type or the attribute id. +// +// Example: +// +// DracoOptions options; +// +// // Set an option common for all attributes. +// options.SetGlobalInt("some_option_name", 2); +// +// // Geometry with two attributes. +// AttributeKey att_key0 = in_key0; +// AttributeKey att_key1 = in_key1; +// +// options.SetAttributeInt(att_key0, "some_option_name", 3); +// +// options.GetAttributeInt(att_key0, "some_option_name"); // Returns 3 +// options.GetAttributeInt(att_key1, "some_option_name"); // Returns 2 +// options.GetGlobalInt("some_option_name"); // Returns 2 +// +template +class DracoOptions { + public: + typedef AttributeKeyT AttributeKey; + + // Get an option for a specific attribute key. If the option is not found in + // an attribute specific storage, the implementation will return a global + // option of the given name (if available). If the option is not found, the + // provided default value |default_val| is returned instead. + int GetAttributeInt(const AttributeKey &att_key, const std::string &name, + int default_val) const; + + // Sets an option for a specific attribute key. + void SetAttributeInt(const AttributeKey &att_key, const std::string &name, + int val); + + float GetAttributeFloat(const AttributeKey &att_key, const std::string &name, + float default_val) const; + void SetAttributeFloat(const AttributeKey &att_key, const std::string &name, + float val); + bool GetAttributeBool(const AttributeKey &att_key, const std::string &name, + bool default_val) const; + void SetAttributeBool(const AttributeKey &att_key, const std::string &name, + bool val); + template + bool GetAttributeVector(const AttributeKey &att_key, const std::string &name, + int num_dims, DataTypeT *val) const; + template + void SetAttributeVector(const AttributeKey &att_key, const std::string &name, + int num_dims, const DataTypeT *val); + + bool IsAttributeOptionSet(const AttributeKey &att_key, + const std::string &name) const; + + // Gets/sets a global option that is not specific to any attribute. + int GetGlobalInt(const std::string &name, int default_val) const { + return global_options_.GetInt(name, default_val); + } + void SetGlobalInt(const std::string &name, int val) { + global_options_.SetInt(name, val); + } + float GetGlobalFloat(const std::string &name, float default_val) const { + return global_options_.GetFloat(name, default_val); + } + void SetGlobalFloat(const std::string &name, float val) { + global_options_.SetFloat(name, val); + } + bool GetGlobalBool(const std::string &name, bool default_val) const { + return global_options_.GetBool(name, default_val); + } + void SetGlobalBool(const std::string &name, bool val) { + global_options_.SetBool(name, val); + } + template + bool GetGlobalVector(const std::string &name, int num_dims, + DataTypeT *val) const { + return global_options_.GetVector(name, num_dims, val); + } + template + void SetGlobalVector(const std::string &name, int num_dims, + const DataTypeT *val) { + global_options_.SetVector(name, val, num_dims); + } + bool IsGlobalOptionSet(const std::string &name) const { + return global_options_.IsOptionSet(name); + } + + // Sets or replaces attribute options with the provided |options|. + void SetAttributeOptions(const AttributeKey &att_key, const Options &options); + void SetGlobalOptions(const Options &options) { global_options_ = options; } + + // Returns |Options| instance for the specified options class if it exists. + const Options *FindAttributeOptions(const AttributeKeyT &att_key) const; + const Options &GetGlobalOptions() const { return global_options_; } + + private: + Options *GetAttributeOptions(const AttributeKeyT &att_key); + + Options global_options_; + + // Storage for options related to geometry attributes. + std::map attribute_options_; +}; + +template +const Options *DracoOptions::FindAttributeOptions( + const AttributeKeyT &att_key) const { + auto it = attribute_options_.find(att_key); + if (it == attribute_options_.end()) { + return nullptr; + } + return &it->second; +} + +template +Options *DracoOptions::GetAttributeOptions( + const AttributeKeyT &att_key) { + auto it = attribute_options_.find(att_key); + if (it != attribute_options_.end()) { + return &it->second; + } + Options new_options; + it = attribute_options_.insert(std::make_pair(att_key, new_options)).first; + return &it->second; +} + +template +int DracoOptions::GetAttributeInt(const AttributeKeyT &att_key, + const std::string &name, + int default_val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetInt(name, default_val); + } + return global_options_.GetInt(name, default_val); +} + +template +void DracoOptions::SetAttributeInt(const AttributeKeyT &att_key, + const std::string &name, + int val) { + GetAttributeOptions(att_key)->SetInt(name, val); +} + +template +float DracoOptions::GetAttributeFloat( + const AttributeKeyT &att_key, const std::string &name, + float default_val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetFloat(name, default_val); + } + return global_options_.GetFloat(name, default_val); +} + +template +void DracoOptions::SetAttributeFloat( + const AttributeKeyT &att_key, const std::string &name, float val) { + GetAttributeOptions(att_key)->SetFloat(name, val); +} + +template +bool DracoOptions::GetAttributeBool(const AttributeKeyT &att_key, + const std::string &name, + bool default_val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetBool(name, default_val); + } + return global_options_.GetBool(name, default_val); +} + +template +void DracoOptions::SetAttributeBool(const AttributeKeyT &att_key, + const std::string &name, + bool val) { + GetAttributeOptions(att_key)->SetBool(name, val); +} + +template +template +bool DracoOptions::GetAttributeVector( + const AttributeKey &att_key, const std::string &name, int num_dims, + DataTypeT *val) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options && att_options->IsOptionSet(name)) { + return att_options->GetVector(name, num_dims, val); + } + return global_options_.GetVector(name, num_dims, val); +} + +template +template +void DracoOptions::SetAttributeVector( + const AttributeKey &att_key, const std::string &name, int num_dims, + const DataTypeT *val) { + GetAttributeOptions(att_key)->SetVector(name, val, num_dims); +} + +template +bool DracoOptions::IsAttributeOptionSet( + const AttributeKey &att_key, const std::string &name) const { + const Options *const att_options = FindAttributeOptions(att_key); + if (att_options) { + return att_options->IsOptionSet(name); + } + return global_options_.IsOptionSet(name); +} + +template +void DracoOptions::SetAttributeOptions( + const AttributeKey &att_key, const Options &options) { + Options *att_options = GetAttributeOptions(att_key); + *att_options = options; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_DRACO_OPTIONS_H_ diff --git a/contrib/draco/src/draco/compression/config/encoder_options.h b/contrib/draco/src/draco/compression/config/encoder_options.h new file mode 100644 index 000000000..ed1b02068 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/encoder_options.h @@ -0,0 +1,97 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_ +#define DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_ + +#include "draco/attributes/geometry_attribute.h" +#include "draco/compression/config/draco_options.h" +#include "draco/compression/config/encoding_features.h" +#include "draco/draco_features.h" + +namespace draco { + +// EncoderOptions allow users to specify so called feature options that are used +// to inform the encoder which encoding features can be used (i.e. which +// features are going to be available to the decoder). +template +class EncoderOptionsBase : public DracoOptions { + public: + static EncoderOptionsBase CreateDefaultOptions() { + EncoderOptionsBase options; +#ifdef DRACO_STANDARD_EDGEBREAKER_SUPPORTED + options.SetSupportedFeature(features::kEdgebreaker, true); +#endif +#ifdef DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED + options.SetSupportedFeature(features::kPredictiveEdgebreaker, true); +#endif + return options; + } + static EncoderOptionsBase CreateEmptyOptions() { + return EncoderOptionsBase(); + } + + // Returns speed options with default value of 5. + int GetEncodingSpeed() const { + return this->GetGlobalInt("encoding_speed", 5); + } + int GetDecodingSpeed() const { + return this->GetGlobalInt("decoding_speed", 5); + } + + // Returns the maximum speed for both encoding/decoding. + int GetSpeed() const { + const int encoding_speed = this->GetGlobalInt("encoding_speed", -1); + const int decoding_speed = this->GetGlobalInt("decoding_speed", -1); + const int max_speed = std::max(encoding_speed, decoding_speed); + if (max_speed == -1) { + return 5; // Default value. + } + return max_speed; + } + + void SetSpeed(int encoding_speed, int decoding_speed) { + this->SetGlobalInt("encoding_speed", encoding_speed); + this->SetGlobalInt("decoding_speed", decoding_speed); + } + + // Sets a given feature as supported or unsupported by the target decoder. + // Encoder will always use only supported features when encoding the input + // geometry. + void SetSupportedFeature(const std::string &name, bool supported) { + feature_options_.SetBool(name, supported); + } + bool IsFeatureSupported(const std::string &name) const { + return feature_options_.GetBool(name); + } + + void SetFeatureOptions(const Options &options) { feature_options_ = options; } + const Options &GetFeaturelOptions() const { return feature_options_; } + + private: + // Use helper methods to construct the encoder options. + // See CreateDefaultOptions(); + EncoderOptionsBase() {} + + // List of supported/unsupported features that can be used by the encoder. + Options feature_options_; +}; + +// Encoder options where attributes are identified by their attribute id. +// Used to set options that are specific to a given geometry. +typedef EncoderOptionsBase EncoderOptions; + +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_ENCODER_OPTIONS_H_ diff --git a/contrib/draco/src/draco/compression/config/encoding_features.h b/contrib/draco/src/draco/compression/config/encoding_features.h new file mode 100644 index 000000000..d6a8b7128 --- /dev/null +++ b/contrib/draco/src/draco/compression/config/encoding_features.h @@ -0,0 +1,39 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File provides helpful macros that define features available for encoding +// the input of the input geometry. These macros can be used as an input in +// the EncoderOptions::SetSupportedFeature() method instead of the text. +// The most recent set of features supported +// by the default implementation is: +// +// kEdgebreaker +// - edgebreaker method for encoding meshes. +// kPredictiveEdgebreaker +// - advanced version of the edgebreaker method (slower but better +// compression). +// +#ifndef DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_ +#define DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_ + +namespace draco { +namespace features { + +constexpr const char *kEdgebreaker = "standard_edgebreaker"; +constexpr const char *kPredictiveEdgebreaker = "predictive_edgebreaker"; + +} // namespace features +} // namespace draco + +#endif // DRACO_COMPRESSION_CONFIG_ENCODING_FEATURES_H_ diff --git a/contrib/draco/src/draco/compression/decode.cc b/contrib/draco/src/draco/compression/decode.cc new file mode 100644 index 000000000..92ae4ff66 --- /dev/null +++ b/contrib/draco/src/draco/compression/decode.cc @@ -0,0 +1,135 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/decode.h" + +#include "draco/compression/config/compression_shared.h" + +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED +#include "draco/compression/mesh/mesh_edgebreaker_decoder.h" +#include "draco/compression/mesh/mesh_sequential_decoder.h" +#endif + +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED +#include "draco/compression/point_cloud/point_cloud_kd_tree_decoder.h" +#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h" +#endif + +namespace draco { + +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED +StatusOr> CreatePointCloudDecoder( + int8_t method) { + if (method == POINT_CLOUD_SEQUENTIAL_ENCODING) { + return std::unique_ptr( + new PointCloudSequentialDecoder()); + } else if (method == POINT_CLOUD_KD_TREE_ENCODING) { + return std::unique_ptr(new PointCloudKdTreeDecoder()); + } + return Status(Status::DRACO_ERROR, "Unsupported encoding method."); +} +#endif + +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED +StatusOr> CreateMeshDecoder(uint8_t method) { + if (method == MESH_SEQUENTIAL_ENCODING) { + return std::unique_ptr(new MeshSequentialDecoder()); + } else if (method == MESH_EDGEBREAKER_ENCODING) { + return std::unique_ptr(new MeshEdgebreakerDecoder()); + } + return Status(Status::DRACO_ERROR, "Unsupported encoding method."); +} +#endif + +StatusOr Decoder::GetEncodedGeometryType( + DecoderBuffer *in_buffer) { + DecoderBuffer temp_buffer(*in_buffer); + DracoHeader header; + DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header)); + if (header.encoder_type >= NUM_ENCODED_GEOMETRY_TYPES) { + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); + } + return static_cast(header.encoder_type); +} + +StatusOr> Decoder::DecodePointCloudFromBuffer( + DecoderBuffer *in_buffer) { + DRACO_ASSIGN_OR_RETURN(EncodedGeometryType type, + GetEncodedGeometryType(in_buffer)) + if (type == POINT_CLOUD) { +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED + std::unique_ptr point_cloud(new PointCloud()); + DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, point_cloud.get())) + return std::move(point_cloud); +#endif + } else if (type == TRIANGULAR_MESH) { +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED + std::unique_ptr mesh(new Mesh()); + DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, mesh.get())) + return static_cast>(std::move(mesh)); +#endif + } + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); +} + +StatusOr> Decoder::DecodeMeshFromBuffer( + DecoderBuffer *in_buffer) { + std::unique_ptr mesh(new Mesh()); + DRACO_RETURN_IF_ERROR(DecodeBufferToGeometry(in_buffer, mesh.get())) + return std::move(mesh); +} + +Status Decoder::DecodeBufferToGeometry(DecoderBuffer *in_buffer, + PointCloud *out_geometry) { +#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED + DecoderBuffer temp_buffer(*in_buffer); + DracoHeader header; + DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header)) + if (header.encoder_type != POINT_CLOUD) { + return Status(Status::DRACO_ERROR, "Input is not a point cloud."); + } + DRACO_ASSIGN_OR_RETURN(std::unique_ptr decoder, + CreatePointCloudDecoder(header.encoder_method)) + + DRACO_RETURN_IF_ERROR(decoder->Decode(options_, in_buffer, out_geometry)) + return OkStatus(); +#else + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); +#endif +} + +Status Decoder::DecodeBufferToGeometry(DecoderBuffer *in_buffer, + Mesh *out_geometry) { +#ifdef DRACO_MESH_COMPRESSION_SUPPORTED + DecoderBuffer temp_buffer(*in_buffer); + DracoHeader header; + DRACO_RETURN_IF_ERROR(PointCloudDecoder::DecodeHeader(&temp_buffer, &header)) + if (header.encoder_type != TRIANGULAR_MESH) { + return Status(Status::DRACO_ERROR, "Input is not a mesh."); + } + DRACO_ASSIGN_OR_RETURN(std::unique_ptr decoder, + CreateMeshDecoder(header.encoder_method)) + + DRACO_RETURN_IF_ERROR(decoder->Decode(options_, in_buffer, out_geometry)) + return OkStatus(); +#else + return Status(Status::DRACO_ERROR, "Unsupported geometry type."); +#endif +} + +void Decoder::SetSkipAttributeTransform(GeometryAttribute::Type att_type) { + options_.SetAttributeBool(att_type, "skip_attribute_transform", true); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/decode.h b/contrib/draco/src/draco/compression/decode.h new file mode 100644 index 000000000..5f3fad26b --- /dev/null +++ b/contrib/draco/src/draco/compression/decode.h @@ -0,0 +1,80 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_DECODE_H_ +#define DRACO_COMPRESSION_DECODE_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/config/decoder_options.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/status_or.h" +#include "draco/draco_features.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// Class responsible for decoding of meshes and point clouds that were +// compressed by a Draco encoder. +class Decoder { + public: + // Returns the geometry type encoded in the input |in_buffer|. + // The return value is one of POINT_CLOUD, MESH or INVALID_GEOMETRY in case + // the input data is invalid. + // The decoded geometry type can be used to choose an appropriate decoding + // function for a given geometry type (see below). + static StatusOr GetEncodedGeometryType( + DecoderBuffer *in_buffer); + + // Decodes point cloud from the provided buffer. The buffer must be filled + // with data that was encoded with either the EncodePointCloudToBuffer or + // EncodeMeshToBuffer methods in encode.h. In case the input buffer contains + // mesh, the returned instance can be down-casted to Mesh. + StatusOr> DecodePointCloudFromBuffer( + DecoderBuffer *in_buffer); + + // Decodes a triangular mesh from the provided buffer. The mesh must be filled + // with data that was encoded using the EncodeMeshToBuffer method in encode.h. + // The function will return nullptr in case the input is invalid or if it was + // encoded with the EncodePointCloudToBuffer method. + StatusOr> DecodeMeshFromBuffer( + DecoderBuffer *in_buffer); + + // Decodes the buffer into a provided geometry. If the geometry is + // incompatible with the encoded data. For example, when |out_geometry| is + // draco::Mesh while the data contains a point cloud, the function will return + // an error status. + Status DecodeBufferToGeometry(DecoderBuffer *in_buffer, + PointCloud *out_geometry); + Status DecodeBufferToGeometry(DecoderBuffer *in_buffer, Mesh *out_geometry); + + // When set, the decoder is going to skip attribute transform for a given + // attribute type. For example for quantized attributes, the decoder would + // skip the dequantization step and the returned geometry would contain an + // attribute with quantized values. The attribute would also contain an + // instance of AttributeTransform class that is used to describe the skipped + // transform, including all parameters that are needed to perform the + // transform manually. + void SetSkipAttributeTransform(GeometryAttribute::Type att_type); + + // Returns the options instance used by the decoder that can be used by users + // to control the decoding process. + DecoderOptions *options() { return &options_; } + + private: + DecoderOptions options_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_DECODE_H_ diff --git a/contrib/draco/src/draco/compression/decode_test.cc b/contrib/draco/src/draco/compression/decode_test.cc new file mode 100644 index 000000000..198714690 --- /dev/null +++ b/contrib/draco/src/draco/compression/decode_test.cc @@ -0,0 +1,169 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/decode.h" + +#include +#include + +#include "draco/core/draco_test_base.h" +#include "draco/core/draco_test_utils.h" +#include "draco/io/file_utils.h" + +namespace { + +class DecodeTest : public ::testing::Test { + protected: + DecodeTest() {} +}; + +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED +TEST_F(DecodeTest, TestSkipAttributeTransform) { + const std::string file_name = "test_nm_quant.0.9.0.drc"; + // Tests that decoders can successfully skip attribute transform. + std::vector data; + ASSERT_TRUE( + draco::ReadFileToBuffer(draco::GetTestFileFullPath(file_name), &data)); + ASSERT_FALSE(data.empty()); + + // Create a draco decoding buffer. Note that no data is copied in this step. + draco::DecoderBuffer buffer; + buffer.Init(data.data(), data.size()); + + draco::Decoder decoder; + // Make sure we skip dequantization for the position attribute. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + + // Decode the input data into a geometry. + std::unique_ptr pc = + decoder.DecodePointCloudFromBuffer(&buffer).value(); + ASSERT_NE(pc, nullptr); + + const draco::PointAttribute *const pos_att = + pc->GetNamedAttribute(draco::GeometryAttribute::POSITION); + ASSERT_NE(pos_att, nullptr); + + // Ensure the position attribute is of type int32_t and that it has a valid + // attribute transform. + ASSERT_EQ(pos_att->data_type(), draco::DT_INT32); + ASSERT_NE(pos_att->GetAttributeTransformData(), nullptr); + + // Normal attribute should be left transformed. + const draco::PointAttribute *const norm_att = + pc->GetNamedAttribute(draco::GeometryAttribute::NORMAL); + ASSERT_EQ(norm_att->data_type(), draco::DT_FLOAT32); + ASSERT_EQ(norm_att->GetAttributeTransformData(), nullptr); +} +#endif + +void TestSkipAttributeTransformOnPointCloudWithColor(const std::string &file) { + std::vector data; + ASSERT_TRUE(draco::ReadFileToBuffer(draco::GetTestFileFullPath(file), &data)); + ASSERT_FALSE(data.empty()); + + // Create a draco decoding buffer. Note that no data is copied in this step. + draco::DecoderBuffer buffer; + buffer.Init(data.data(), data.size()); + + draco::Decoder decoder; + // Make sure we skip dequantization for the position attribute. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + + // Decode the input data into a geometry. + std::unique_ptr pc = + decoder.DecodePointCloudFromBuffer(&buffer).value(); + ASSERT_NE(pc, nullptr); + + const draco::PointAttribute *const pos_att = + pc->GetNamedAttribute(draco::GeometryAttribute::POSITION); + ASSERT_NE(pos_att, nullptr); + + // Ensure the position attribute is of type int32_t or uint32_t and that it + // has a valid attribute transform. + ASSERT_TRUE(pos_att->data_type() == draco::DT_INT32 || + pos_att->data_type() == draco::DT_UINT32); + ASSERT_NE(pos_att->GetAttributeTransformData(), nullptr); + + const draco::PointAttribute *const clr_att = + pc->GetNamedAttribute(draco::GeometryAttribute::COLOR); + ASSERT_EQ(clr_att->data_type(), draco::DT_UINT8); + + // Ensure the color attribute was decoded correctly. Perform the decoding + // again without skipping the position dequantization and compare the + // attribute values. + + draco::DecoderBuffer buffer_2; + buffer_2.Init(data.data(), data.size()); + + draco::Decoder decoder_2; + + // Decode the input data into a geometry. + std::unique_ptr pc_2 = + decoder_2.DecodePointCloudFromBuffer(&buffer_2).value(); + ASSERT_NE(pc_2, nullptr); + + const draco::PointAttribute *const clr_att_2 = + pc_2->GetNamedAttribute(draco::GeometryAttribute::COLOR); + ASSERT_NE(clr_att_2, nullptr); + for (draco::PointIndex pi(0); pi < pc_2->num_points(); ++pi) { + // Colors should be exactly the same for both cases. + ASSERT_EQ(std::memcmp(clr_att->GetAddress(clr_att->mapped_index(pi)), + clr_att_2->GetAddress(clr_att_2->mapped_index(pi)), + clr_att->byte_stride()), + 0); + } +} + +TEST_F(DecodeTest, TestSkipAttributeTransformOnPointCloud) { + // Tests that decoders can successfully skip attribute transform on a point + // cloud with multiple attributes encoded with one attributes encoder. + TestSkipAttributeTransformOnPointCloudWithColor("pc_color.drc"); + TestSkipAttributeTransformOnPointCloudWithColor("pc_kd_color.drc"); +} + +TEST_F(DecodeTest, TestSkipAttributeTransformWithNoQuantization) { + // Tests that decoders can successfully skip attribute transform even though + // the input model was not quantized (it has no attribute transform). + const std::string file_name = "point_cloud_no_qp.drc"; + std::vector data; + ASSERT_TRUE( + draco::ReadFileToBuffer(draco::GetTestFileFullPath(file_name), &data)); + ASSERT_FALSE(data.empty()); + + // Create a draco decoding buffer. Note that no data is copied in this step. + draco::DecoderBuffer buffer; + buffer.Init(data.data(), data.size()); + + draco::Decoder decoder; + // Make sure we skip dequantization for the position attribute. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + + // Decode the input data into a geometry. + std::unique_ptr pc = + decoder.DecodePointCloudFromBuffer(&buffer).value(); + ASSERT_NE(pc, nullptr); + + const draco::PointAttribute *const pos_att = + pc->GetNamedAttribute(draco::GeometryAttribute::POSITION); + ASSERT_NE(pos_att, nullptr); + + // Ensure the position attribute is of type float32 since the attribute was + // not quantized. + ASSERT_EQ(pos_att->data_type(), draco::DT_FLOAT32); + + // Make sure there is no attribute transform available for the attribute. + ASSERT_EQ(pos_att->GetAttributeTransformData(), nullptr); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/encode.cc b/contrib/draco/src/draco/compression/encode.cc new file mode 100644 index 000000000..f380aec15 --- /dev/null +++ b/contrib/draco/src/draco/compression/encode.cc @@ -0,0 +1,96 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/encode.h" + +#include "draco/compression/expert_encode.h" + +namespace draco { + +Encoder::Encoder() {} + +Status Encoder::EncodePointCloudToBuffer(const PointCloud &pc, + EncoderBuffer *out_buffer) { + ExpertEncoder encoder(pc); + encoder.Reset(CreateExpertEncoderOptions(pc)); + return encoder.EncodeToBuffer(out_buffer); +} + +Status Encoder::EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer) { + ExpertEncoder encoder(m); + encoder.Reset(CreateExpertEncoderOptions(m)); + DRACO_RETURN_IF_ERROR(encoder.EncodeToBuffer(out_buffer)); + set_num_encoded_points(encoder.num_encoded_points()); + set_num_encoded_faces(encoder.num_encoded_faces()); + return OkStatus(); +} + +EncoderOptions Encoder::CreateExpertEncoderOptions(const PointCloud &pc) const { + EncoderOptions ret_options = EncoderOptions::CreateEmptyOptions(); + ret_options.SetGlobalOptions(options().GetGlobalOptions()); + ret_options.SetFeatureOptions(options().GetFeaturelOptions()); + // Convert type-based attribute options to specific attributes in the provided + // point cloud. + for (int i = 0; i < pc.num_attributes(); ++i) { + const Options *att_options = + options().FindAttributeOptions(pc.attribute(i)->attribute_type()); + if (att_options) { + ret_options.SetAttributeOptions(i, *att_options); + } + } + return ret_options; +} + +void Encoder::Reset( + const EncoderOptionsBase &options) { + Base::Reset(options); +} + +void Encoder::Reset() { Base::Reset(); } + +void Encoder::SetSpeedOptions(int encoding_speed, int decoding_speed) { + Base::SetSpeedOptions(encoding_speed, decoding_speed); +} + +void Encoder::SetAttributeQuantization(GeometryAttribute::Type type, + int quantization_bits) { + options().SetAttributeInt(type, "quantization_bits", quantization_bits); +} + +void Encoder::SetAttributeExplicitQuantization(GeometryAttribute::Type type, + int quantization_bits, + int num_dims, + const float *origin, + float range) { + options().SetAttributeInt(type, "quantization_bits", quantization_bits); + options().SetAttributeVector(type, "quantization_origin", num_dims, origin); + options().SetAttributeFloat(type, "quantization_range", range); +} + +void Encoder::SetEncodingMethod(int encoding_method) { + Base::SetEncodingMethod(encoding_method); +} + +Status Encoder::SetAttributePredictionScheme(GeometryAttribute::Type type, + int prediction_scheme_method) { + Status status = CheckPredictionScheme(type, prediction_scheme_method); + if (!status.ok()) { + return status; + } + options().SetAttributeInt(type, "prediction_scheme", + prediction_scheme_method); + return status; +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/encode.h b/contrib/draco/src/draco/compression/encode.h new file mode 100644 index 000000000..bce8b34c2 --- /dev/null +++ b/contrib/draco/src/draco/compression/encode.h @@ -0,0 +1,140 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENCODE_H_ +#define DRACO_COMPRESSION_ENCODE_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/config/encoder_options.h" +#include "draco/compression/encode_base.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/status.h" +#include "draco/mesh/mesh.h" + +namespace draco { + +// Basic helper class for encoding geometry using the Draco compression library. +// The class provides various methods that can be used to control several common +// options used during the encoding, such as the number of quantization bits for +// a given attribute. All these options are defined per attribute type, i.e., +// if there are more attributes of the same type (such as multiple texture +// coordinate attributes), the same options are going to be used for all of the +// attributes of this type. If different attributes of the same type need to +// use different options, use ExpertEncoder in expert_encode.h. +class Encoder + : public EncoderBase> { + public: + typedef EncoderBase> Base; + + Encoder(); + virtual ~Encoder() {} + + // Encodes a point cloud to the provided buffer. + virtual Status EncodePointCloudToBuffer(const PointCloud &pc, + EncoderBuffer *out_buffer); + + // Encodes a mesh to the provided buffer. + virtual Status EncodeMeshToBuffer(const Mesh &m, EncoderBuffer *out_buffer); + + // Set encoder options used during the geometry encoding. Note that this call + // overwrites any modifications to the options done with the functions below, + // i.e., it resets the encoder. + void Reset(const EncoderOptionsBase &options); + void Reset(); + + // Sets the desired encoding and decoding speed for the given options. + // + // 0 = slowest speed, but the best compression. + // 10 = fastest, but the worst compression. + // -1 = undefined. + // + // Note that both speed options affect the encoder choice of used methods and + // algorithms. For example, a requirement for fast decoding may prevent the + // encoder from using the best compression methods even if the encoding speed + // is set to 0. In general, the faster of the two options limits the choice of + // features that can be used by the encoder. Additionally, setting + // |decoding_speed| to be faster than the |encoding_speed| may allow the + // encoder to choose the optimal method out of the available features for the + // given |decoding_speed|. + void SetSpeedOptions(int encoding_speed, int decoding_speed); + + // Sets the quantization compression options for a named attribute. The + // attribute values will be quantized in a box defined by the maximum extent + // of the attribute values. I.e., the actual precision of this option depends + // on the scale of the attribute values. + void SetAttributeQuantization(GeometryAttribute::Type type, + int quantization_bits); + + // Sets the explicit quantization compression for a named attribute. The + // attribute values will be quantized in a coordinate system defined by the + // provided origin and range (the input values should be within interval: + // ). + void SetAttributeExplicitQuantization(GeometryAttribute::Type type, + int quantization_bits, int num_dims, + const float *origin, float range); + + // Sets the desired prediction method for a given attribute. By default, + // prediction scheme is selected automatically by the encoder using other + // provided options (such as speed) and input geometry type (mesh, point + // cloud). This function should be called only when a specific prediction is + // preferred (e.g., when it is known that the encoder would select a less + // optimal prediction for the given input data). + // + // |prediction_scheme_method| should be one of the entries defined in + // compression/config/compression_shared.h : + // + // PREDICTION_NONE - use no prediction. + // PREDICTION_DIFFERENCE - delta coding + // MESH_PREDICTION_PARALLELOGRAM - parallelogram prediction for meshes. + // MESH_PREDICTION_CONSTRAINED_PARALLELOGRAM + // - better and more costly version of the parallelogram prediction. + // MESH_PREDICTION_TEX_COORDS_PORTABLE + // - specialized predictor for tex coordinates. + // MESH_PREDICTION_GEOMETRIC_NORMAL + // - specialized predictor for normal coordinates. + // + // Note that in case the desired prediction cannot be used, the default + // prediction will be automatically used instead. + Status SetAttributePredictionScheme(GeometryAttribute::Type type, + int prediction_scheme_method); + + // Sets the desired encoding method for a given geometry. By default, encoding + // method is selected based on the properties of the input geometry and based + // on the other options selected in the used EncoderOptions (such as desired + // encoding and decoding speed). This function should be called only when a + // specific method is required. + // + // |encoding_method| can be one of the values defined in + // compression/config/compression_shared.h based on the type of the input + // geometry that is going to be encoded. For point clouds, allowed entries are + // POINT_CLOUD_SEQUENTIAL_ENCODING + // POINT_CLOUD_KD_TREE_ENCODING + // + // For meshes the input can be + // MESH_SEQUENTIAL_ENCODING + // MESH_EDGEBREAKER_ENCODING + // + // If the selected method cannot be used for the given input, the subsequent + // call of EncodePointCloudToBuffer or EncodeMeshToBuffer is going to fail. + void SetEncodingMethod(int encoding_method); + + protected: + // Creates encoder options for the expert encoder used during the actual + // encoding. + EncoderOptions CreateExpertEncoderOptions(const PointCloud &pc) const; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENCODE_H_ diff --git a/contrib/draco/src/draco/compression/encode_base.h b/contrib/draco/src/draco/compression/encode_base.h new file mode 100644 index 000000000..c501bc4fa --- /dev/null +++ b/contrib/draco/src/draco/compression/encode_base.h @@ -0,0 +1,131 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENCODE_BASE_H_ +#define DRACO_COMPRESSION_ENCODE_BASE_H_ + +#include "draco/attributes/geometry_attribute.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/core/status.h" + +namespace draco { + +// Base class for our geometry encoder classes. |EncoderOptionsT| specifies +// options class used by the encoder. Please, see encode.h and expert_encode.h +// for more details and method descriptions. +template +class EncoderBase { + public: + typedef EncoderOptionsT OptionsType; + + EncoderBase() + : options_(EncoderOptionsT::CreateDefaultOptions()), + num_encoded_points_(0), + num_encoded_faces_(0) {} + virtual ~EncoderBase() {} + + const EncoderOptionsT &options() const { return options_; } + EncoderOptionsT &options() { return options_; } + + // If enabled, it tells the encoder to keep track of the number of encoded + // points and faces (default = false). + // Note that this can slow down encoding for certain encoders. + void SetTrackEncodedProperties(bool flag); + + // Returns the number of encoded points and faces during the last encoding + // operation. Returns 0 if SetTrackEncodedProperties() was not set. + size_t num_encoded_points() const { return num_encoded_points_; } + size_t num_encoded_faces() const { return num_encoded_faces_; } + + protected: + void Reset(const EncoderOptionsT &options) { options_ = options; } + + void Reset() { options_ = EncoderOptionsT::CreateDefaultOptions(); } + + void SetSpeedOptions(int encoding_speed, int decoding_speed) { + options_.SetSpeed(encoding_speed, decoding_speed); + } + + void SetEncodingMethod(int encoding_method) { + options_.SetGlobalInt("encoding_method", encoding_method); + } + + void SetEncodingSubmethod(int encoding_submethod) { + options_.SetGlobalInt("encoding_submethod", encoding_submethod); + } + + Status CheckPredictionScheme(GeometryAttribute::Type att_type, + int prediction_scheme) const { + // Out of bound checks: + if (prediction_scheme < PREDICTION_NONE) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme requested."); + } + if (prediction_scheme >= NUM_PREDICTION_SCHEMES) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme requested."); + } + // Deprecated prediction schemes: + if (prediction_scheme == MESH_PREDICTION_TEX_COORDS_DEPRECATED) { + return Status(Status::DRACO_ERROR, + "MESH_PREDICTION_TEX_COORDS_DEPRECATED is deprecated."); + } + if (prediction_scheme == MESH_PREDICTION_MULTI_PARALLELOGRAM) { + return Status(Status::DRACO_ERROR, + "MESH_PREDICTION_MULTI_PARALLELOGRAM is deprecated."); + } + // Attribute specific checks: + if (prediction_scheme == MESH_PREDICTION_TEX_COORDS_PORTABLE) { + if (att_type != GeometryAttribute::TEX_COORD) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme for attribute type."); + } + } + if (prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL) { + if (att_type != GeometryAttribute::NORMAL) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme for attribute type."); + } + } + // TODO(hemmer): Try to enable more prediction schemes for normals. + if (att_type == GeometryAttribute::NORMAL) { + if (!(prediction_scheme == PREDICTION_DIFFERENCE || + prediction_scheme == MESH_PREDICTION_GEOMETRIC_NORMAL)) { + return Status(Status::DRACO_ERROR, + "Invalid prediction scheme for attribute type."); + } + } + return OkStatus(); + } + + protected: + void set_num_encoded_points(size_t num) { num_encoded_points_ = num; } + void set_num_encoded_faces(size_t num) { num_encoded_faces_ = num; } + + private: + EncoderOptionsT options_; + + size_t num_encoded_points_; + size_t num_encoded_faces_; +}; + +template +void EncoderBase::SetTrackEncodedProperties(bool flag) { + options_.SetGlobalBool("store_number_of_encoded_points", flag); + options_.SetGlobalBool("store_number_of_encoded_faces", flag); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENCODE_BASE_H_ diff --git a/contrib/draco/src/draco/compression/encode_test.cc b/contrib/draco/src/draco/compression/encode_test.cc new file mode 100644 index 000000000..fde4f6f5b --- /dev/null +++ b/contrib/draco/src/draco/compression/encode_test.cc @@ -0,0 +1,407 @@ +// Copyright 2017 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "draco/compression/encode.h" + +#include +#include +#include + +#include "draco/attributes/attribute_quantization_transform.h" +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/decode.h" +#include "draco/compression/expert_encode.h" +#include "draco/core/draco_test_base.h" +#include "draco/core/draco_test_utils.h" +#include "draco/core/vector_d.h" +#include "draco/io/obj_decoder.h" +#include "draco/mesh/triangle_soup_mesh_builder.h" +#include "draco/point_cloud/point_cloud_builder.h" + +namespace { + +class EncodeTest : public ::testing::Test { + protected: + EncodeTest() {} + std::unique_ptr CreateTestMesh() const { + draco::TriangleSoupMeshBuilder mesh_builder; + + // Create a simple mesh with one face. + mesh_builder.Start(1); + + // Add one position attribute and two texture coordinate attributes. + const int32_t pos_att_id = mesh_builder.AddAttribute( + draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32); + const int32_t tex_att_id_0 = mesh_builder.AddAttribute( + draco::GeometryAttribute::TEX_COORD, 2, draco::DT_FLOAT32); + const int32_t tex_att_id_1 = mesh_builder.AddAttribute( + draco::GeometryAttribute::TEX_COORD, 2, draco::DT_FLOAT32); + + // Initialize the attribute values. + mesh_builder.SetAttributeValuesForFace( + pos_att_id, draco::FaceIndex(0), draco::Vector3f(0.f, 0.f, 0.f).data(), + draco::Vector3f(1.f, 0.f, 0.f).data(), + draco::Vector3f(1.f, 1.f, 0.f).data()); + mesh_builder.SetAttributeValuesForFace( + tex_att_id_0, draco::FaceIndex(0), draco::Vector2f(0.f, 0.f).data(), + draco::Vector2f(1.f, 0.f).data(), draco::Vector2f(1.f, 1.f).data()); + mesh_builder.SetAttributeValuesForFace( + tex_att_id_1, draco::FaceIndex(0), draco::Vector2f(0.f, 0.f).data(), + draco::Vector2f(1.f, 0.f).data(), draco::Vector2f(1.f, 1.f).data()); + + return mesh_builder.Finalize(); + } + + std::unique_ptr CreateTestPointCloud() const { + draco::PointCloudBuilder pc_builder; + + constexpr int kNumPoints = 100; + constexpr int kNumGenAttCoords0 = 4; + constexpr int kNumGenAttCoords1 = 6; + pc_builder.Start(kNumPoints); + + // Add one position attribute and two generic attributes. + const int32_t pos_att_id = pc_builder.AddAttribute( + draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32); + const int32_t gen_att_id_0 = pc_builder.AddAttribute( + draco::GeometryAttribute::GENERIC, kNumGenAttCoords0, draco::DT_UINT32); + const int32_t gen_att_id_1 = pc_builder.AddAttribute( + draco::GeometryAttribute::GENERIC, kNumGenAttCoords1, draco::DT_UINT8); + + std::vector gen_att_data_0(kNumGenAttCoords0); + std::vector gen_att_data_1(kNumGenAttCoords1); + + // Initialize the attribute values. + for (draco::PointIndex i(0); i < kNumPoints; ++i) { + const float pos_coord = static_cast(i.value()); + pc_builder.SetAttributeValueForPoint( + pos_att_id, i, + draco::Vector3f(pos_coord, -pos_coord, pos_coord).data()); + + for (int j = 0; j < kNumGenAttCoords0; ++j) { + gen_att_data_0[j] = i.value(); + } + pc_builder.SetAttributeValueForPoint(gen_att_id_0, i, + gen_att_data_0.data()); + + for (int j = 0; j < kNumGenAttCoords1; ++j) { + gen_att_data_1[j] = -i.value(); + } + pc_builder.SetAttributeValueForPoint(gen_att_id_1, i, + gen_att_data_1.data()); + } + return pc_builder.Finalize(false); + } + + std::unique_ptr CreateTestPointCloudPosNorm() const { + draco::PointCloudBuilder pc_builder; + + constexpr int kNumPoints = 20; + pc_builder.Start(kNumPoints); + + // Add one position attribute and a normal attribute. + const int32_t pos_att_id = pc_builder.AddAttribute( + draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32); + const int32_t norm_att_id = pc_builder.AddAttribute( + draco::GeometryAttribute::NORMAL, 3, draco::DT_FLOAT32); + + // Initialize the attribute values. + for (draco::PointIndex i(0); i < kNumPoints; ++i) { + const float pos_coord = static_cast(i.value()); + pc_builder.SetAttributeValueForPoint( + pos_att_id, i, + draco::Vector3f(pos_coord, -pos_coord, pos_coord).data()); + + // Pseudo-random normal. + draco::Vector3f norm(pos_coord * 2.f, pos_coord - 2.f, pos_coord * 3.f); + norm.Normalize(); + pc_builder.SetAttributeValueForPoint(norm_att_id, i, norm.data()); + } + + return pc_builder.Finalize(false); + } + + int GetQuantizationBitsFromAttribute(const draco::PointAttribute *att) const { + if (att == nullptr) { + return -1; + } + draco::AttributeQuantizationTransform transform; + if (!transform.InitFromAttribute(*att)) { + return -1; + } + return transform.quantization_bits(); + } + + void VerifyNumQuantizationBits(const draco::EncoderBuffer &buffer, + int pos_quantization, + int tex_coord_0_quantization, + int tex_coord_1_quantization) const { + draco::Decoder decoder; + + // Skip the dequantization for the attributes which will allow us to get + // the number of quantization bits used during encoding. + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION); + decoder.SetSkipAttributeTransform(draco::GeometryAttribute::TEX_COORD); + + draco::DecoderBuffer in_buffer; + in_buffer.Init(buffer.data(), buffer.size()); + auto mesh = decoder.DecodeMeshFromBuffer(&in_buffer).value(); + ASSERT_NE(mesh, nullptr); + ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(0)), + pos_quantization); + ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(1)), + tex_coord_0_quantization); + ASSERT_EQ(GetQuantizationBitsFromAttribute(mesh->attribute(2)), + tex_coord_1_quantization); + } + + // Tests that the encoder returns the correct number of encoded points and + // faces for a given mesh or point cloud. + void TestNumberOfEncodedEntries(const std::string &file_name, + int32_t encoding_method) { + std::unique_ptr geometry; + draco::Mesh *mesh = nullptr; + + if (encoding_method == draco::MESH_EDGEBREAKER_ENCODING || + encoding_method == draco::MESH_SEQUENTIAL_ENCODING) { + std::unique_ptr mesh_tmp = + draco::ReadMeshFromTestFile(file_name); + mesh = mesh_tmp.get(); + if (!mesh->DeduplicateAttributeValues()) { + return; + } + mesh->DeduplicatePointIds(); + geometry = std::move(mesh_tmp); + } else { + geometry = draco::ReadPointCloudFromTestFile(file_name); + } + ASSERT_NE(mesh, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 14); + encoder.SetAttributeQuantization(draco::GeometryAttribute::TEX_COORD, 12); + encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 10); + + encoder.SetEncodingMethod(encoding_method); + + encoder.SetTrackEncodedProperties(true); + + draco::EncoderBuffer buffer; + if (mesh) { + encoder.EncodeMeshToBuffer(*mesh, &buffer); + } else { + encoder.EncodePointCloudToBuffer(*geometry, &buffer); + } + + // Ensure the logged number of encoded points and faces matches the number + // we get from the decoder. + + draco::DecoderBuffer decoder_buffer; + decoder_buffer.Init(buffer.data(), buffer.size()); + draco::Decoder decoder; + + if (mesh) { + auto maybe_mesh = decoder.DecodeMeshFromBuffer(&decoder_buffer); + ASSERT_TRUE(maybe_mesh.ok()); + auto decoded_mesh = std::move(maybe_mesh).value(); + ASSERT_NE(decoded_mesh, nullptr); + ASSERT_EQ(decoded_mesh->num_points(), encoder.num_encoded_points()); + ASSERT_EQ(decoded_mesh->num_faces(), encoder.num_encoded_faces()); + } else { + auto maybe_pc = decoder.DecodePointCloudFromBuffer(&decoder_buffer); + ASSERT_TRUE(maybe_pc.ok()); + auto decoded_pc = std::move(maybe_pc).value(); + ASSERT_EQ(decoded_pc->num_points(), encoder.num_encoded_points()); + } + } +}; + +TEST_F(EncodeTest, TestExpertEncoderQuantization) { + // This test verifies that the expert encoder can quantize individual + // attributes even if they have the same type. + auto mesh = CreateTestMesh(); + ASSERT_NE(mesh, nullptr); + + draco::ExpertEncoder encoder(*mesh); + encoder.SetAttributeQuantization(0, 16); // Position quantization. + encoder.SetAttributeQuantization(1, 15); // Tex-coord 0 quantization. + encoder.SetAttributeQuantization(2, 14); // Tex-coord 1 quantization. + + draco::EncoderBuffer buffer; + encoder.EncodeToBuffer(&buffer); + VerifyNumQuantizationBits(buffer, 16, 15, 14); +} + +TEST_F(EncodeTest, TestEncoderQuantization) { + // This test verifies that Encoder applies the same quantization to all + // attributes of the same type. + auto mesh = CreateTestMesh(); + ASSERT_NE(mesh, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16); + encoder.SetAttributeQuantization(draco::GeometryAttribute::TEX_COORD, 15); + + draco::EncoderBuffer buffer; + encoder.EncodeMeshToBuffer(*mesh, &buffer); + VerifyNumQuantizationBits(buffer, 16, 15, 15); +} + +TEST_F(EncodeTest, TestLinesObj) { + // This test verifies that Encoder can encode file that contains only line + // segments (that are ignored). + std::unique_ptr mesh( + draco::ReadMeshFromTestFile("test_lines.obj")); + ASSERT_NE(mesh, nullptr); + ASSERT_EQ(mesh->num_faces(), 0); + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("test_lines.obj")); + ASSERT_NE(pc, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16); + + draco::EncoderBuffer buffer; + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestQuantizedInfinity) { + // This test verifies that Encoder fails to encode point cloud when requesting + // quantization of attribute that contains infinity values. + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("float_inf_point_cloud.ply")); + ASSERT_NE(pc, nullptr); + + { + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_SEQUENTIAL_ENCODING); + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11); + + draco::EncoderBuffer buffer; + ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); + } + + { + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_KD_TREE_ENCODING); + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11); + + draco::EncoderBuffer buffer; + ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); + } +} + +TEST_F(EncodeTest, TestUnquantizedInfinity) { + // This test verifies that Encoder can successfully encode point cloud when + // not requesting quantization of attribute that contains infinity values. + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("float_inf_point_cloud.ply")); + ASSERT_NE(pc, nullptr); + + // Note that the KD tree encoding method is not applicable to float values. + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_SEQUENTIAL_ENCODING); + + draco::EncoderBuffer buffer; + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestQuantizedAndUnquantizedAttributes) { + // This test verifies that Encoder can successfully encode point cloud with + // two float attribiutes - one quantized and another unquantized. The encoder + // defaults to sequential encoding in this case. + std::unique_ptr pc( + draco::ReadPointCloudFromTestFile("float_two_att_point_cloud.ply")); + ASSERT_NE(pc, nullptr); + + draco::Encoder encoder; + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 11); + encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 0); + draco::EncoderBuffer buffer; + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestKdTreeEncoding) { + // This test verifies that the API can successfully encode a point cloud + // defined by several attributes using the kd tree method. + std::unique_ptr pc = CreateTestPointCloud(); + ASSERT_NE(pc, nullptr); + + draco::EncoderBuffer buffer; + draco::Encoder encoder; + encoder.SetEncodingMethod(draco::POINT_CLOUD_KD_TREE_ENCODING); + // First try it without quantizing positions which should fail. + ASSERT_FALSE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); + + // Now set quantization for the position attribute which should make + // the encoder happy. + encoder.SetAttributeQuantization(draco::GeometryAttribute::POSITION, 16); + ASSERT_TRUE(encoder.EncodePointCloudToBuffer(*pc, &buffer).ok()); +} + +TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntries) { + TestNumberOfEncodedEntries("deg_faces.obj", draco::MESH_EDGEBREAKER_ENCODING); + TestNumberOfEncodedEntries("deg_faces.obj", draco::MESH_SEQUENTIAL_ENCODING); + TestNumberOfEncodedEntries("cube_att.obj", draco::MESH_EDGEBREAKER_ENCODING); + TestNumberOfEncodedEntries("test_nm.obj", draco::MESH_EDGEBREAKER_ENCODING); + TestNumberOfEncodedEntries("test_nm.obj", draco::MESH_SEQUENTIAL_ENCODING); + TestNumberOfEncodedEntries("cube_subd.obj", + draco::POINT_CLOUD_KD_TREE_ENCODING); + TestNumberOfEncodedEntries("cube_subd.obj", + draco::POINT_CLOUD_SEQUENTIAL_ENCODING); +} + +TEST_F(EncodeTest, TestTrackingOfNumberOfEncodedEntriesNotSet) { + // Tests that when tracing of encoded properties is disabled, the returned + // number of encoded faces and points is 0. + std::unique_ptr mesh( + draco::ReadMeshFromTestFile("cube_att.obj")); + ASSERT_NE(mesh, nullptr); + + draco::EncoderBuffer buffer; + draco::Encoder encoder; + + ASSERT_TRUE(encoder.EncodeMeshToBuffer(*mesh, &buffer).ok()); + ASSERT_EQ(encoder.num_encoded_points(), 0); + ASSERT_EQ(encoder.num_encoded_faces(), 0); +} + +TEST_F(EncodeTest, TestNoPosQuantizationNormalCoding) { + // Tests that we can encode and decode a file with quantized normals but + // non-quantized positions. + const auto mesh = draco::ReadMeshFromTestFile("test_nm.obj"); + ASSERT_NE(mesh, nullptr); + + // The mesh should have positions and normals. + ASSERT_NE(mesh->GetNamedAttribute(draco::GeometryAttribute::POSITION), + nullptr); + ASSERT_NE(mesh->GetNamedAttribute(draco::GeometryAttribute::NORMAL), nullptr); + + draco::EncoderBuffer buffer; + draco::Encoder encoder; + // No quantization for positions. + encoder.SetAttributeQuantization(draco::GeometryAttribute::NORMAL, 8); + + DRACO_ASSERT_OK(encoder.EncodeMeshToBuffer(*mesh, &buffer)); + + draco::Decoder decoder; + + draco::DecoderBuffer in_buffer; + in_buffer.Init(buffer.data(), buffer.size()); + const auto decoded_mesh = decoder.DecodeMeshFromBuffer(&in_buffer).value(); + ASSERT_NE(decoded_mesh, nullptr); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/entropy/ans.h b/contrib/draco/src/draco/compression/entropy/ans.h new file mode 100644 index 000000000..c71d58975 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/ans.h @@ -0,0 +1,527 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_ANS_H_ +#define DRACO_COMPRESSION_ENTROPY_ANS_H_ +// An implementation of Asymmetric Numeral Systems (rANS). +// See http://arxiv.org/abs/1311.2540v2 for more information on rANS. +// This file is based off libvpx's ans.h. + +#include + +#define DRACO_ANS_DIVIDE_BY_MULTIPLY 1 +#if DRACO_ANS_DIVIDE_BY_MULTIPLY +#include "draco/core/divide.h" +#endif +#include "draco/core/macros.h" + +namespace draco { + +#if DRACO_ANS_DIVIDE_BY_MULTIPLY + +#define DRACO_ANS_DIVREM(quotient, remainder, dividend, divisor) \ + do { \ + quotient = fastdiv(dividend, divisor); \ + remainder = dividend - quotient * divisor; \ + } while (0) +#define DRACO_ANS_DIV(dividend, divisor) fastdiv(dividend, divisor) +#else +#define DRACO_ANS_DIVREM(quotient, remainder, dividend, divisor) \ + do { \ + quotient = dividend / divisor; \ + remainder = dividend % divisor; \ + } while (0) +#define DRACO_ANS_DIV(dividend, divisor) ((dividend) / (divisor)) +#endif + +struct AnsCoder { + AnsCoder() : buf(nullptr), buf_offset(0), state(0) {} + uint8_t *buf; + int buf_offset; + uint32_t state; +}; + +struct AnsDecoder { + AnsDecoder() : buf(nullptr), buf_offset(0), state(0) {} + const uint8_t *buf; + int buf_offset; + uint32_t state; +}; + +typedef uint8_t AnsP8; +#define DRACO_ANS_P8_PRECISION 256u +#define DRACO_ANS_L_BASE (4096u) +#define DRACO_ANS_IO_BASE 256 + +static uint32_t mem_get_le16(const void *vmem) { + uint32_t val; + const uint8_t *mem = (const uint8_t *)vmem; + + val = mem[1] << 8; + val |= mem[0]; + return val; +} + +static uint32_t mem_get_le24(const void *vmem) { + uint32_t val; + const uint8_t *mem = (const uint8_t *)vmem; + + val = mem[2] << 16; + val |= mem[1] << 8; + val |= mem[0]; + return val; +} + +static inline uint32_t mem_get_le32(const void *vmem) { + uint32_t val; + const uint8_t *mem = (const uint8_t *)vmem; + + val = mem[3] << 24; + val |= mem[2] << 16; + val |= mem[1] << 8; + val |= mem[0]; + return val; +} + +static inline void mem_put_le16(void *vmem, uint32_t val) { + uint8_t *mem = reinterpret_cast(vmem); + + mem[0] = (val >> 0) & 0xff; + mem[1] = (val >> 8) & 0xff; +} + +static inline void mem_put_le24(void *vmem, uint32_t val) { + uint8_t *mem = reinterpret_cast(vmem); + + mem[0] = (val >> 0) & 0xff; + mem[1] = (val >> 8) & 0xff; + mem[2] = (val >> 16) & 0xff; +} + +static inline void mem_put_le32(void *vmem, uint32_t val) { + uint8_t *mem = reinterpret_cast(vmem); + + mem[0] = (val >> 0) & 0xff; + mem[1] = (val >> 8) & 0xff; + mem[2] = (val >> 16) & 0xff; + mem[3] = (val >> 24) & 0xff; +} + +static inline void ans_write_init(struct AnsCoder *const ans, + uint8_t *const buf) { + ans->buf = buf; + ans->buf_offset = 0; + ans->state = DRACO_ANS_L_BASE; +} + +static inline int ans_write_end(struct AnsCoder *const ans) { + uint32_t state; + DRACO_DCHECK_GE(ans->state, DRACO_ANS_L_BASE); + DRACO_DCHECK_LT(ans->state, DRACO_ANS_L_BASE * DRACO_ANS_IO_BASE); + state = ans->state - DRACO_ANS_L_BASE; + if (state < (1 << 6)) { + ans->buf[ans->buf_offset] = (0x00 << 6) + state; + return ans->buf_offset + 1; + } else if (state < (1 << 14)) { + mem_put_le16(ans->buf + ans->buf_offset, (0x01 << 14) + state); + return ans->buf_offset + 2; + } else if (state < (1 << 22)) { + mem_put_le24(ans->buf + ans->buf_offset, (0x02 << 22) + state); + return ans->buf_offset + 3; + } else { + DRACO_DCHECK(0 && "State is too large to be serialized"); + return ans->buf_offset; + } +} + +// rABS with descending spread. +// p or p0 takes the place of l_s from the paper. +// DRACO_ANS_P8_PRECISION is m. +static inline void rabs_desc_write(struct AnsCoder *ans, int val, AnsP8 p0) { + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + const unsigned l_s = val ? p : p0; + unsigned quot, rem; + if (ans->state >= + DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) { + ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE; + ans->state /= DRACO_ANS_IO_BASE; + } + DRACO_ANS_DIVREM(quot, rem, ans->state, l_s); + ans->state = quot * DRACO_ANS_P8_PRECISION + rem + (val ? 0 : p); +} + +#define DRACO_ANS_IMPL1 0 +#define UNPREDICTABLE(x) x +static inline int rabs_desc_read(struct AnsDecoder *ans, AnsP8 p0) { + int val; +#if DRACO_ANS_IMPL1 + unsigned l_s; +#else + unsigned quot, rem, x, xn; +#endif + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + if (ans->state < DRACO_ANS_L_BASE && ans->buf_offset > 0) { + ans->state = ans->state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } +#if DRACO_ANS_IMPL1 + val = ans->state % DRACO_ANS_P8_PRECISION < p; + l_s = val ? p : p0; + ans->state = (ans->state / DRACO_ANS_P8_PRECISION) * l_s + + ans->state % DRACO_ANS_P8_PRECISION - (!val * p); +#else + x = ans->state; + quot = x / DRACO_ANS_P8_PRECISION; + rem = x % DRACO_ANS_P8_PRECISION; + xn = quot * p; + val = rem < p; + if (UNPREDICTABLE(val)) { + ans->state = xn + rem; + } else { + // ans->state = quot * p0 + rem - p; + ans->state = x - xn - p; + } +#endif + return val; +} + +// rABS with ascending spread. +// p or p0 takes the place of l_s from the paper. +// DRACO_ANS_P8_PRECISION is m. +static inline void rabs_asc_write(struct AnsCoder *ans, int val, AnsP8 p0) { + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + const unsigned l_s = val ? p : p0; + unsigned quot, rem; + if (ans->state >= + DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) { + ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE; + ans->state /= DRACO_ANS_IO_BASE; + } + DRACO_ANS_DIVREM(quot, rem, ans->state, l_s); + ans->state = quot * DRACO_ANS_P8_PRECISION + rem + (val ? p0 : 0); +} + +static inline int rabs_asc_read(struct AnsDecoder *ans, AnsP8 p0) { + int val; +#if DRACO_ANS_IMPL1 + unsigned l_s; +#else + unsigned quot, rem, x, xn; +#endif + const AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + if (ans->state < DRACO_ANS_L_BASE) { + ans->state = ans->state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } +#if DRACO_ANS_IMPL1 + val = ans->state % DRACO_ANS_P8_PRECISION < p; + l_s = val ? p : p0; + ans->state = (ans->state / DRACO_ANS_P8_PRECISION) * l_s + + ans->state % DRACO_ANS_P8_PRECISION - (!val * p); +#else + x = ans->state; + quot = x / DRACO_ANS_P8_PRECISION; + rem = x % DRACO_ANS_P8_PRECISION; + xn = quot * p; + val = rem >= p0; + if (UNPREDICTABLE(val)) { + ans->state = xn + rem - p0; + } else { + // ans->state = quot * p0 + rem - p0; + ans->state = x - xn; + } +#endif + return val; +} + +#define rabs_read rabs_desc_read +#define rabs_write rabs_desc_write + +// uABS with normalization. +static inline void uabs_write(struct AnsCoder *ans, int val, AnsP8 p0) { + AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + const unsigned l_s = val ? p : p0; + while (ans->state >= + DRACO_ANS_L_BASE / DRACO_ANS_P8_PRECISION * DRACO_ANS_IO_BASE * l_s) { + ans->buf[ans->buf_offset++] = ans->state % DRACO_ANS_IO_BASE; + ans->state /= DRACO_ANS_IO_BASE; + } + if (!val) { + ans->state = DRACO_ANS_DIV(ans->state * DRACO_ANS_P8_PRECISION, p0); + } else { + ans->state = + DRACO_ANS_DIV((ans->state + 1) * DRACO_ANS_P8_PRECISION + p - 1, p) - 1; + } +} + +static inline int uabs_read(struct AnsDecoder *ans, AnsP8 p0) { + AnsP8 p = DRACO_ANS_P8_PRECISION - p0; + int s; + // unsigned int xp1; + unsigned xp, sp; + unsigned state = ans->state; + while (state < DRACO_ANS_L_BASE && ans->buf_offset > 0) { + state = state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } + sp = state * p; + // xp1 = (sp + p) / DRACO_ANS_P8_PRECISION; + xp = sp / DRACO_ANS_P8_PRECISION; + // s = xp1 - xp; + s = (sp & 0xFF) >= p0; + if (UNPREDICTABLE(s)) { + ans->state = xp; + } else { + ans->state = state - xp; + } + return s; +} + +static inline int uabs_read_bit(struct AnsDecoder *ans) { + int s; + unsigned state = ans->state; + while (state < DRACO_ANS_L_BASE && ans->buf_offset > 0) { + state = state * DRACO_ANS_IO_BASE + ans->buf[--ans->buf_offset]; + } + s = static_cast(state & 1); + ans->state = state >> 1; + return s; +} + +static inline int ans_read_init(struct AnsDecoder *const ans, + const uint8_t *const buf, int offset) { + unsigned x; + if (offset < 1) { + return 1; + } + ans->buf = buf; + x = buf[offset - 1] >> 6; + if (x == 0) { + ans->buf_offset = offset - 1; + ans->state = buf[offset - 1] & 0x3F; + } else if (x == 1) { + if (offset < 2) { + return 1; + } + ans->buf_offset = offset - 2; + ans->state = mem_get_le16(buf + offset - 2) & 0x3FFF; + } else if (x == 2) { + if (offset < 3) { + return 1; + } + ans->buf_offset = offset - 3; + ans->state = mem_get_le24(buf + offset - 3) & 0x3FFFFF; + } else { + return 1; + } + ans->state += DRACO_ANS_L_BASE; + if (ans->state >= DRACO_ANS_L_BASE * DRACO_ANS_IO_BASE) { + return 1; + } + return 0; +} + +static inline int ans_read_end(struct AnsDecoder *const ans) { + return ans->state == DRACO_ANS_L_BASE; +} + +static inline int ans_reader_has_error(const struct AnsDecoder *const ans) { + return ans->state < DRACO_ANS_L_BASE && ans->buf_offset == 0; +} + +struct rans_sym { + uint32_t prob; + uint32_t cum_prob; // not-inclusive. +}; + +// Class for performing rANS encoding using a desired number of precision bits. +// The max number of precision bits is currently 19. The actual number of +// symbols in the input alphabet should be (much) smaller than that, otherwise +// the compression rate may suffer. +template +class RAnsEncoder { + public: + RAnsEncoder() {} + + // Provides the input buffer where the data is going to be stored. + inline void write_init(uint8_t *const buf) { + ans_.buf = buf; + ans_.buf_offset = 0; + ans_.state = l_rans_base; + } + + // Needs to be called after all symbols are encoded. + inline int write_end() { + uint32_t state; + DRACO_DCHECK_GE(ans_.state, l_rans_base); + DRACO_DCHECK_LT(ans_.state, l_rans_base * DRACO_ANS_IO_BASE); + state = ans_.state - l_rans_base; + if (state < (1 << 6)) { + ans_.buf[ans_.buf_offset] = (0x00 << 6) + state; + return ans_.buf_offset + 1; + } else if (state < (1 << 14)) { + mem_put_le16(ans_.buf + ans_.buf_offset, (0x01 << 14) + state); + return ans_.buf_offset + 2; + } else if (state < (1 << 22)) { + mem_put_le24(ans_.buf + ans_.buf_offset, (0x02 << 22) + state); + return ans_.buf_offset + 3; + } else if (state < (1 << 30)) { + mem_put_le32(ans_.buf + ans_.buf_offset, (0x03u << 30u) + state); + return ans_.buf_offset + 4; + } else { + DRACO_DCHECK(0 && "State is too large to be serialized"); + return ans_.buf_offset; + } + } + + // rANS with normalization. + // sym->prob takes the place of l_s from the paper. + // rans_precision is m. + inline void rans_write(const struct rans_sym *const sym) { + const uint32_t p = sym->prob; + while (ans_.state >= l_rans_base / rans_precision * DRACO_ANS_IO_BASE * p) { + ans_.buf[ans_.buf_offset++] = ans_.state % DRACO_ANS_IO_BASE; + ans_.state /= DRACO_ANS_IO_BASE; + } + // TODO(ostava): The division and multiplication should be optimized. + ans_.state = + (ans_.state / p) * rans_precision + ans_.state % p + sym->cum_prob; + } + + private: + static constexpr int rans_precision = 1 << rans_precision_bits_t; + static constexpr int l_rans_base = rans_precision * 4; + AnsCoder ans_; +}; + +struct rans_dec_sym { + uint32_t val; + uint32_t prob; + uint32_t cum_prob; // not-inclusive. +}; + +// Class for performing rANS decoding using a desired number of precision bits. +// The number of precision bits needs to be the same as with the RAnsEncoder +// that was used to encode the input data. +template +class RAnsDecoder { + public: + RAnsDecoder() {} + + // Initializes the decoder from the input buffer. The |offset| specifies the + // number of bytes encoded by the encoder. A non zero return value is an + // error. + inline int read_init(const uint8_t *const buf, int offset) { + unsigned x; + if (offset < 1) { + return 1; + } + ans_.buf = buf; + x = buf[offset - 1] >> 6; + if (x == 0) { + ans_.buf_offset = offset - 1; + ans_.state = buf[offset - 1] & 0x3F; + } else if (x == 1) { + if (offset < 2) { + return 1; + } + ans_.buf_offset = offset - 2; + ans_.state = mem_get_le16(buf + offset - 2) & 0x3FFF; + } else if (x == 2) { + if (offset < 3) { + return 1; + } + ans_.buf_offset = offset - 3; + ans_.state = mem_get_le24(buf + offset - 3) & 0x3FFFFF; + } else if (x == 3) { + ans_.buf_offset = offset - 4; + ans_.state = mem_get_le32(buf + offset - 4) & 0x3FFFFFFF; + } else { + return 1; + } + ans_.state += l_rans_base; + if (ans_.state >= l_rans_base * DRACO_ANS_IO_BASE) { + return 1; + } + return 0; + } + + inline int read_end() { return ans_.state == l_rans_base; } + + inline int reader_has_error() { + return ans_.state < l_rans_base && ans_.buf_offset == 0; + } + + inline int rans_read() { + unsigned rem; + unsigned quo; + struct rans_dec_sym sym; + while (ans_.state < l_rans_base && ans_.buf_offset > 0) { + ans_.state = ans_.state * DRACO_ANS_IO_BASE + ans_.buf[--ans_.buf_offset]; + } + // |rans_precision| is a power of two compile time constant, and the below + // division and modulo are going to be optimized by the compiler. + quo = ans_.state / rans_precision; + rem = ans_.state % rans_precision; + fetch_sym(&sym, rem); + ans_.state = quo * sym.prob + rem - sym.cum_prob; + return sym.val; + } + + // Construct a lookup table with |rans_precision| number of entries. + // Returns false if the table couldn't be built (because of wrong input data). + inline bool rans_build_look_up_table(const uint32_t token_probs[], + uint32_t num_symbols) { + lut_table_.resize(rans_precision); + probability_table_.resize(num_symbols); + uint32_t cum_prob = 0; + uint32_t act_prob = 0; + for (uint32_t i = 0; i < num_symbols; ++i) { + probability_table_[i].prob = token_probs[i]; + probability_table_[i].cum_prob = cum_prob; + cum_prob += token_probs[i]; + if (cum_prob > rans_precision) { + return false; + } + for (uint32_t j = act_prob; j < cum_prob; ++j) { + lut_table_[j] = i; + } + act_prob = cum_prob; + } + if (cum_prob != rans_precision) { + return false; + } + return true; + } + + private: + inline void fetch_sym(struct rans_dec_sym *out, uint32_t rem) { + uint32_t symbol = lut_table_[rem]; + out->val = symbol; + out->prob = probability_table_[symbol].prob; + out->cum_prob = probability_table_[symbol].cum_prob; + } + + static constexpr int rans_precision = 1 << rans_precision_bits_t; + static constexpr int l_rans_base = rans_precision * 4; + std::vector lut_table_; + std::vector probability_table_; + AnsDecoder ans_; +}; + +#undef DRACO_ANS_DIVREM +#undef DRACO_ANS_P8_PRECISION +#undef DRACO_ANS_L_BASE +#undef DRACO_ANS_IO_BASE + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_ANS_H_ diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h new file mode 100644 index 000000000..cd4271193 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_coding.h @@ -0,0 +1,53 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// File providing shared functionality for RAnsSymbolEncoder and +// RAnsSymbolDecoder (see rans_symbol_encoder.h / rans_symbol_decoder.h). +#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_ +#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_ + +#include "draco/compression/entropy/ans.h" + +namespace draco { + +// Computes the desired precision of the rANS method for the specified number of +// unique symbols the input data (defined by their bit_length). +constexpr int ComputeRAnsUnclampedPrecision(int symbols_bit_length) { + return (3 * symbols_bit_length) / 2; +} + +// Computes the desired precision clamped to guarantee a valid functionality of +// our rANS library (which is between 12 to 20 bits). +constexpr int ComputeRAnsPrecisionFromUniqueSymbolsBitLength( + int symbols_bit_length) { + return ComputeRAnsUnclampedPrecision(symbols_bit_length) < 12 ? 12 + : ComputeRAnsUnclampedPrecision(symbols_bit_length) > 20 + ? 20 + : ComputeRAnsUnclampedPrecision(symbols_bit_length); +} + +// Compute approximate frequency table size needed for storing the provided +// symbols. +static inline int64_t ApproximateRAnsFrequencyTableBits( + int32_t max_value, int num_unique_symbols) { + // Approximate number of bits for storing zero frequency entries using the + // run length encoding (with max length of 64). + const int64_t table_zero_frequency_bits = + 8 * (num_unique_symbols + (max_value - num_unique_symbols) / 64); + return 8 * num_unique_symbols + table_zero_frequency_bits; +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_CODING_H_ diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h new file mode 100644 index 000000000..10cdc6781 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_decoder.h @@ -0,0 +1,164 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_ +#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_ + +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/entropy/rans_symbol_coding.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/varint_decoding.h" +#include "draco/draco_features.h" + +namespace draco { + +// A helper class for decoding symbols using the rANS algorithm (see ans.h). +// The class can be used to decode the probability table and the data encoded +// by the RAnsSymbolEncoder. |unique_symbols_bit_length_t| must be the same as +// the one used for the corresponding RAnsSymbolEncoder. +template +class RAnsSymbolDecoder { + public: + RAnsSymbolDecoder() : num_symbols_(0) {} + + // Initialize the decoder and decode the probability table. + bool Create(DecoderBuffer *buffer); + + uint32_t num_symbols() const { return num_symbols_; } + + // Starts decoding from the buffer. The buffer will be advanced past the + // encoded data after this call. + bool StartDecoding(DecoderBuffer *buffer); + uint32_t DecodeSymbol() { return ans_.rans_read(); } + void EndDecoding(); + + private: + static constexpr int rans_precision_bits_ = + ComputeRAnsPrecisionFromUniqueSymbolsBitLength( + unique_symbols_bit_length_t); + static constexpr int rans_precision_ = 1 << rans_precision_bits_; + + std::vector probability_table_; + uint32_t num_symbols_; + RAnsDecoder ans_; +}; + +template +bool RAnsSymbolDecoder::Create( + DecoderBuffer *buffer) { + // Check that the DecoderBuffer version is set. + if (buffer->bitstream_version() == 0) { + return false; + } + // Decode the number of alphabet symbols. +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + if (!buffer->Decode(&num_symbols_)) { + return false; + } + + } else +#endif + { + if (!DecodeVarint(&num_symbols_, buffer)) { + return false; + } + } + probability_table_.resize(num_symbols_); + if (num_symbols_ == 0) { + return true; + } + // Decode the table. + for (uint32_t i = 0; i < num_symbols_; ++i) { + uint8_t prob_data = 0; + // Decode the first byte and extract the number of extra bytes we need to + // get, or the offset to the next symbol with non-zero probability. + if (!buffer->Decode(&prob_data)) { + return false; + } + // Token is stored in the first two bits of the first byte. Values 0-2 are + // used to indicate the number of extra bytes, and value 3 is a special + // symbol used to denote run-length coding of zero probability entries. + // See rans_symbol_encoder.h for more details. + const int token = prob_data & 3; + if (token == 3) { + const uint32_t offset = prob_data >> 2; + if (i + offset >= num_symbols_) { + return false; + } + // Set zero probability for all symbols in the specified range. + for (uint32_t j = 0; j < offset + 1; ++j) { + probability_table_[i + j] = 0; + } + i += offset; + } else { + const int extra_bytes = token; + uint32_t prob = prob_data >> 2; + for (int b = 0; b < extra_bytes; ++b) { + uint8_t eb; + if (!buffer->Decode(&eb)) { + return false; + } + // Shift 8 bits for each extra byte and subtract 2 for the two first + // bits. + prob |= static_cast(eb) << (8 * (b + 1) - 2); + } + probability_table_[i] = prob; + } + } + if (!ans_.rans_build_look_up_table(&probability_table_[0], num_symbols_)) { + return false; + } + return true; +} + +template +bool RAnsSymbolDecoder::StartDecoding( + DecoderBuffer *buffer) { + uint64_t bytes_encoded; + // Decode the number of bytes encoded by the encoder. +#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED + if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 0)) { + if (!buffer->Decode(&bytes_encoded)) { + return false; + } + + } else +#endif + { + if (!DecodeVarint(&bytes_encoded, buffer)) { + return false; + } + } + if (bytes_encoded > static_cast(buffer->remaining_size())) { + return false; + } + const uint8_t *const data_head = + reinterpret_cast(buffer->data_head()); + // Advance the buffer past the rANS data. + buffer->Advance(bytes_encoded); + if (ans_.read_init(data_head, static_cast(bytes_encoded)) != 0) { + return false; + } + return true; +} + +template +void RAnsSymbolDecoder::EndDecoding() { + ans_.read_end(); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_DECODER_H_ diff --git a/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h b/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h new file mode 100644 index 000000000..4e07ec871 --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/rans_symbol_encoder.h @@ -0,0 +1,290 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_ +#define DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_ + +#include +#include +#include + +#include "draco/compression/entropy/ans.h" +#include "draco/compression/entropy/rans_symbol_coding.h" +#include "draco/core/encoder_buffer.h" +#include "draco/core/varint_encoding.h" + +namespace draco { + +// A helper class for encoding symbols using the rANS algorithm (see ans.h). +// The class can be used to initialize and encode probability table needed by +// rANS, and to perform encoding of symbols into the provided EncoderBuffer. +template +class RAnsSymbolEncoder { + public: + RAnsSymbolEncoder() + : num_symbols_(0), num_expected_bits_(0), buffer_offset_(0) {} + + // Creates a probability table needed by the rANS library and encode it into + // the provided buffer. + bool Create(const uint64_t *frequencies, int num_symbols, + EncoderBuffer *buffer); + + void StartEncoding(EncoderBuffer *buffer); + void EncodeSymbol(uint32_t symbol) { + ans_.rans_write(&probability_table_[symbol]); + } + void EndEncoding(EncoderBuffer *buffer); + + // rANS requires to encode the input symbols in the reverse order. + static constexpr bool needs_reverse_encoding() { return true; } + + private: + // Functor used for sorting symbol ids according to their probabilities. + // The functor sorts symbol indices that index an underlying map between + // symbol ids and their probabilities. We don't sort the probability table + // directly, because that would require an additional indirection during the + // EncodeSymbol() function. + struct ProbabilityLess { + explicit ProbabilityLess(const std::vector *probs) + : probabilities(probs) {} + bool operator()(int i, int j) const { + return probabilities->at(i).prob < probabilities->at(j).prob; + } + const std::vector *probabilities; + }; + + // Encodes the probability table into the output buffer. + bool EncodeTable(EncoderBuffer *buffer); + + static constexpr int rans_precision_bits_ = + ComputeRAnsPrecisionFromUniqueSymbolsBitLength( + unique_symbols_bit_length_t); + static constexpr int rans_precision_ = 1 << rans_precision_bits_; + + std::vector probability_table_; + // The number of symbols in the input alphabet. + uint32_t num_symbols_; + // Expected number of bits that is needed to encode the input. + uint64_t num_expected_bits_; + + RAnsEncoder ans_; + // Initial offset of the encoder buffer before any ans data was encoded. + uint64_t buffer_offset_; +}; + +template +bool RAnsSymbolEncoder::Create( + const uint64_t *frequencies, int num_symbols, EncoderBuffer *buffer) { + // Compute the total of the input frequencies. + uint64_t total_freq = 0; + int max_valid_symbol = 0; + for (int i = 0; i < num_symbols; ++i) { + total_freq += frequencies[i]; + if (frequencies[i] > 0) { + max_valid_symbol = i; + } + } + num_symbols = max_valid_symbol + 1; + num_symbols_ = num_symbols; + probability_table_.resize(num_symbols); + const double total_freq_d = static_cast(total_freq); + const double rans_precision_d = static_cast(rans_precision_); + // Compute probabilities by rescaling the normalized frequencies into interval + // [1, rans_precision - 1]. The total probability needs to be equal to + // rans_precision. + int total_rans_prob = 0; + for (int i = 0; i < num_symbols; ++i) { + const uint64_t freq = frequencies[i]; + + // Normalized probability. + const double prob = static_cast(freq) / total_freq_d; + + // RAns probability in range of [1, rans_precision - 1]. + uint32_t rans_prob = static_cast(prob * rans_precision_d + 0.5f); + if (rans_prob == 0 && freq > 0) { + rans_prob = 1; + } + probability_table_[i].prob = rans_prob; + total_rans_prob += rans_prob; + } + // Because of rounding errors, the total precision may not be exactly accurate + // and we may need to adjust the entries a little bit. + if (total_rans_prob != rans_precision_) { + std::vector sorted_probabilities(num_symbols); + for (int i = 0; i < num_symbols; ++i) { + sorted_probabilities[i] = i; + } + std::sort(sorted_probabilities.begin(), sorted_probabilities.end(), + ProbabilityLess(&probability_table_)); + if (total_rans_prob < rans_precision_) { + // This happens rather infrequently, just add the extra needed precision + // to the most frequent symbol. + probability_table_[sorted_probabilities.back()].prob += + rans_precision_ - total_rans_prob; + } else { + // We have over-allocated the precision, which is quite common. + // Rescale the probabilities of all symbols. + int32_t error = total_rans_prob - rans_precision_; + while (error > 0) { + const double act_total_prob_d = static_cast(total_rans_prob); + const double act_rel_error_d = rans_precision_d / act_total_prob_d; + for (int j = num_symbols - 1; j > 0; --j) { + int symbol_id = sorted_probabilities[j]; + if (probability_table_[symbol_id].prob <= 1) { + if (j == num_symbols - 1) { + return false; // Most frequent symbol would be empty. + } + break; + } + const int32_t new_prob = static_cast( + floor(act_rel_error_d * + static_cast(probability_table_[symbol_id].prob))); + int32_t fix = probability_table_[symbol_id].prob - new_prob; + if (fix == 0u) { + fix = 1; + } + if (fix >= static_cast(probability_table_[symbol_id].prob)) { + fix = probability_table_[symbol_id].prob - 1; + } + if (fix > error) { + fix = error; + } + probability_table_[symbol_id].prob -= fix; + total_rans_prob -= fix; + error -= fix; + if (total_rans_prob == rans_precision_) { + break; + } + } + } + } + } + + // Compute the cumulative probability (cdf). + uint32_t total_prob = 0; + for (int i = 0; i < num_symbols; ++i) { + probability_table_[i].cum_prob = total_prob; + total_prob += probability_table_[i].prob; + } + if (total_prob != rans_precision_) { + return false; + } + + // Estimate the number of bits needed to encode the input. + // From Shannon entropy the total number of bits N is: + // N = -sum{i : all_symbols}(F(i) * log2(P(i))) + // where P(i) is the normalized probability of symbol i and F(i) is the + // symbol's frequency in the input data. + double num_bits = 0; + for (int i = 0; i < num_symbols; ++i) { + if (probability_table_[i].prob == 0) { + continue; + } + const double norm_prob = + static_cast(probability_table_[i].prob) / rans_precision_d; + num_bits += static_cast(frequencies[i]) * log2(norm_prob); + } + num_expected_bits_ = static_cast(ceil(-num_bits)); + if (!EncodeTable(buffer)) { + return false; + } + return true; +} + +template +bool RAnsSymbolEncoder::EncodeTable( + EncoderBuffer *buffer) { + EncodeVarint(num_symbols_, buffer); + // Use varint encoding for the probabilities (first two bits represent the + // number of bytes used - 1). + for (uint32_t i = 0; i < num_symbols_; ++i) { + const uint32_t prob = probability_table_[i].prob; + int num_extra_bytes = 0; + if (prob >= (1 << 6)) { + num_extra_bytes++; + if (prob >= (1 << 14)) { + num_extra_bytes++; + if (prob >= (1 << 22)) { + // The maximum number of precision bits is 20 so we should not really + // get to this point. + return false; + } + } + } + if (prob == 0) { + // When the probability of the symbol is 0, set the first two bits to 1 + // (unique identifier) and use the remaining 6 bits to store the offset + // to the next symbol with non-zero probability. + uint32_t offset = 0; + for (; offset < (1 << 6) - 1; ++offset) { + // Note: we don't have to check whether the next symbol id is larger + // than num_symbols_ because we know that the last symbol always has + // non-zero probability. + const uint32_t next_prob = probability_table_[i + offset + 1].prob; + if (next_prob > 0) { + break; + } + } + buffer->Encode(static_cast((offset << 2) | 3)); + i += offset; + } else { + // Encode the first byte (including the number of extra bytes). + buffer->Encode(static_cast((prob << 2) | (num_extra_bytes & 3))); + // Encode the extra bytes. + for (int b = 0; b < num_extra_bytes; ++b) { + buffer->Encode(static_cast(prob >> (8 * (b + 1) - 2))); + } + } + } + return true; +} + +template +void RAnsSymbolEncoder::StartEncoding( + EncoderBuffer *buffer) { + // Allocate extra storage just in case. + const uint64_t required_bits = 2 * num_expected_bits_ + 32; + + buffer_offset_ = buffer->size(); + const int64_t required_bytes = (required_bits + 7) / 8; + buffer->Resize(buffer_offset_ + required_bytes + sizeof(buffer_offset_)); + uint8_t *const data = + reinterpret_cast(const_cast(buffer->data())); + ans_.write_init(data + buffer_offset_); +} + +template +void RAnsSymbolEncoder::EndEncoding( + EncoderBuffer *buffer) { + char *const src = const_cast(buffer->data()) + buffer_offset_; + + // TODO(fgalligan): Look into changing this to uint32_t as write_end() + // returns an int. + const uint64_t bytes_written = static_cast(ans_.write_end()); + EncoderBuffer var_size_buffer; + EncodeVarint(bytes_written, &var_size_buffer); + const uint32_t size_len = static_cast(var_size_buffer.size()); + char *const dst = src + size_len; + memmove(dst, src, bytes_written); + + // Store the size of the encoded data. + memcpy(src, var_size_buffer.data(), size_len); + + // Resize the buffer to match the number of encoded bytes. + buffer->Resize(buffer_offset_ + bytes_written + size_len); +} + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_RANS_SYMBOL_ENCODER_H_ diff --git a/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc b/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc new file mode 100644 index 000000000..137eafe5f --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/shannon_entropy.cc @@ -0,0 +1,147 @@ +#include "draco/compression/entropy/shannon_entropy.h" + +#include +#include + +#include "draco/compression/entropy/rans_symbol_coding.h" + +namespace draco { + +int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols, + int max_value, int *out_num_unique_symbols) { + // First find frequency of all unique symbols in the input array. + int num_unique_symbols = 0; + std::vector symbol_frequencies(max_value + 1, 0); + for (int i = 0; i < num_symbols; ++i) { + ++symbol_frequencies[symbols[i]]; + } + double total_bits = 0; + double num_symbols_d = num_symbols; + for (int i = 0; i < max_value + 1; ++i) { + if (symbol_frequencies[i] > 0) { + ++num_unique_symbols; + // Compute Shannon entropy for the symbol. + // We don't want to use std::log2 here for Android build. + total_bits += + symbol_frequencies[i] * + log2(static_cast(symbol_frequencies[i]) / num_symbols_d); + } + } + if (out_num_unique_symbols) { + *out_num_unique_symbols = num_unique_symbols; + } + // Entropy is always negative. + return static_cast(-total_bits); +} + +double ComputeBinaryShannonEntropy(uint32_t num_values, + uint32_t num_true_values) { + if (num_values == 0) { + return 0; + } + + // We can exit early if the data set has 0 entropy. + if (num_true_values == 0 || num_values == num_true_values) { + return 0; + } + const double true_freq = + static_cast(num_true_values) / static_cast(num_values); + const double false_freq = 1.0 - true_freq; + return -(true_freq * std::log2(true_freq) + + false_freq * std::log2(false_freq)); +} + +ShannonEntropyTracker::ShannonEntropyTracker() {} + +ShannonEntropyTracker::EntropyData ShannonEntropyTracker::Peek( + const uint32_t *symbols, int num_symbols) { + return UpdateSymbols(symbols, num_symbols, false); +} + +ShannonEntropyTracker::EntropyData ShannonEntropyTracker::Push( + const uint32_t *symbols, int num_symbols) { + return UpdateSymbols(symbols, num_symbols, true); +} + +ShannonEntropyTracker::EntropyData ShannonEntropyTracker::UpdateSymbols( + const uint32_t *symbols, int num_symbols, bool push_changes) { + EntropyData ret_data = entropy_data_; + ret_data.num_values += num_symbols; + for (int i = 0; i < num_symbols; ++i) { + const uint32_t symbol = symbols[i]; + if (frequencies_.size() <= symbol) { + frequencies_.resize(symbol + 1, 0); + } + + // Update the entropy of the stream. Note that entropy of |N| values + // represented by |S| unique symbols is defined as: + // + // entropy = -sum_over_S(symbol_frequency / N * log2(symbol_frequency / N)) + // + // To avoid the need to recompute the entire sum when new values are added, + // we can instead update a so called entropy norm that is defined as: + // + // entropy_norm = sum_over_S(symbol_frequency * log2(symbol_frequency)) + // + // In this case, all we need to do is update entries on the symbols where + // the frequency actually changed. + // + // Note that entropy_norm and entropy can be easily transformed to the + // actual entropy as: + // + // entropy = log2(N) - entropy_norm / N + // + double old_symbol_entropy_norm = 0; + int &frequency = frequencies_[symbol]; + if (frequency > 1) { + old_symbol_entropy_norm = frequency * std::log2(frequency); + } else if (frequency == 0) { + ret_data.num_unique_symbols++; + if (symbol > static_cast(ret_data.max_symbol)) { + ret_data.max_symbol = symbol; + } + } + frequency++; + const double new_symbol_entropy_norm = frequency * std::log2(frequency); + + // Update the final entropy. + ret_data.entropy_norm += new_symbol_entropy_norm - old_symbol_entropy_norm; + } + if (push_changes) { + // Update entropy data of the stream. + entropy_data_ = ret_data; + } else { + // We are only peeking so do not update the stream. + // Revert changes in the frequency table. + for (int i = 0; i < num_symbols; ++i) { + const uint32_t symbol = symbols[i]; + frequencies_[symbol]--; + } + } + return ret_data; +} + +int64_t ShannonEntropyTracker::GetNumberOfDataBits( + const EntropyData &entropy_data) { + if (entropy_data.num_values < 2) { + return 0; + } + // We need to compute the number of bits required to represent the stream + // using the entropy norm. Note that: + // + // entropy = log2(num_values) - entropy_norm / num_values + // + // and number of bits required for the entropy is: num_values * entropy + // + return static_cast( + ceil(entropy_data.num_values * std::log2(entropy_data.num_values) - + entropy_data.entropy_norm)); +} + +int64_t ShannonEntropyTracker::GetNumberOfRAnsTableBits( + const EntropyData &entropy_data) { + return ApproximateRAnsFrequencyTableBits(entropy_data.max_symbol + 1, + entropy_data.num_unique_symbols); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/entropy/shannon_entropy.h b/contrib/draco/src/draco/compression/entropy/shannon_entropy.h new file mode 100644 index 000000000..85165f4cb --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/shannon_entropy.h @@ -0,0 +1,110 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_ +#define DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_ + +#include + +#include + +namespace draco { + +// Computes an approximate Shannon entropy of symbols stored in the provided +// input array |symbols|. The entropy corresponds to the number of bits that is +// required to represent/store all the symbols using an optimal entropy coding +// algorithm. See for example "A mathematical theory of communication" by +// Shannon'48 (http://ieeexplore.ieee.org/document/6773024/). +// +// |max_value| is a required input that define the maximum value in the input +// |symbols| array. +// +// |out_num_unique_symbols| is an optional output argument that stores the +// number of unique symbols contained within the |symbols| array. +// TODO(ostava): This should be renamed or the return value should be changed to +// return the actual entropy and not the number of bits needed to represent the +// input symbols. +int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols, + int max_value, int *out_num_unique_symbols); + +// Computes the Shannon entropy of |num_values| Boolean entries, where +// |num_true_values| are set to true. +// Returns entropy between 0-1. +double ComputeBinaryShannonEntropy(uint32_t num_values, + uint32_t num_true_values); + +// Class that can be used to keep track of the Shannon entropy on streamed data. +// As new symbols are pushed to the tracker, the entropy is automatically +// recomputed. The class also support recomputing the entropy without actually +// pushing the symbols to the tracker through the Peek() method. +class ShannonEntropyTracker { + public: + ShannonEntropyTracker(); + + // Struct for holding entropy data about the symbols added to the tracker. + // It can be used to compute the number of bits needed to store the data using + // the method: + // ShannonEntropyTracker::GetNumberOfDataBits(entropy_data); + // or to compute the approximate size of the frequency table needed by the + // rans coding using method: + // ShannonEntropyTracker::GetNumberOfRAnsTableBits(entropy_data); + struct EntropyData { + double entropy_norm; + int num_values; + int max_symbol; + int num_unique_symbols; + EntropyData() + : entropy_norm(0.0), + num_values(0), + max_symbol(0), + num_unique_symbols(0) {} + }; + + // Adds new symbols to the tracker and recomputes the entropy accordingly. + EntropyData Push(const uint32_t *symbols, int num_symbols); + + // Returns new entropy data for the tracker as if |symbols| were added to the + // tracker without actually changing the status of the tracker. + EntropyData Peek(const uint32_t *symbols, int num_symbols); + + // Gets the number of bits needed for encoding symbols added to the tracker. + int64_t GetNumberOfDataBits() const { + return GetNumberOfDataBits(entropy_data_); + } + + // Gets the number of bits needed for encoding frequency table using the rans + // encoder. + int64_t GetNumberOfRAnsTableBits() const { + return GetNumberOfRAnsTableBits(entropy_data_); + } + + // Gets the number of bits needed for encoding given |entropy_data|. + static int64_t GetNumberOfDataBits(const EntropyData &entropy_data); + + // Gets the number of bits needed for encoding frequency table using the rans + // encoder for the given |entropy_data|. + static int64_t GetNumberOfRAnsTableBits(const EntropyData &entropy_data); + + private: + EntropyData UpdateSymbols(const uint32_t *symbols, int num_symbols, + bool push_changes); + + std::vector frequencies_; + + EntropyData entropy_data_; +}; + +} // namespace draco + +#endif // DRACO_COMPRESSION_ENTROPY_SHANNON_ENTROPY_H_ diff --git a/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc b/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc new file mode 100644 index 000000000..732c7d2fb --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/shannon_entropy_test.cc @@ -0,0 +1,58 @@ +#include "draco/compression/entropy/shannon_entropy.h" + +#include "draco/core/draco_test_base.h" + +namespace { + +TEST(ShannonEntropyTest, TestBinaryEntropy) { + // Test verifies that computing binary entropy works as expected. + ASSERT_EQ(draco::ComputeBinaryShannonEntropy(0, 0), 0); + ASSERT_EQ(draco::ComputeBinaryShannonEntropy(10, 0), 0); + ASSERT_EQ(draco::ComputeBinaryShannonEntropy(10, 10), 0); + ASSERT_NEAR(draco::ComputeBinaryShannonEntropy(10, 5), 1.0, 1e-4); +} + +TEST(ShannonEntropyTest, TestStreamEntropy) { + // Test verifies that the entropy of streamed data is computed correctly. + const std::vector symbols = {1, 5, 1, 100, 2, 1}; + + draco::ShannonEntropyTracker entropy_tracker; + + // Nothing added, 0 entropy. + ASSERT_EQ(entropy_tracker.GetNumberOfDataBits(), 0); + + // Try to push symbols one by one. + uint32_t max_symbol = 0; + for (int i = 0; i < symbols.size(); ++i) { + if (symbols[i] > max_symbol) { + max_symbol = symbols[i]; + } + const auto entropy_data = entropy_tracker.Push(&symbols[i], 1); + + const int64_t stream_entropy_bits = entropy_tracker.GetNumberOfDataBits(); + // Ensure the returned entropy_data is in sync with the stream. + ASSERT_EQ(draco::ShannonEntropyTracker::GetNumberOfDataBits(entropy_data), + stream_entropy_bits); + + // Make sure the entropy is approximately the same as the one we compute + // directly from all symbols. + const int64_t expected_entropy_bits = draco::ComputeShannonEntropy( + symbols.data(), i + 1, max_symbol, nullptr); + + // For now hardcoded tolerance of 2 bits. + ASSERT_NEAR(expected_entropy_bits, stream_entropy_bits, 2); + } + + // Compare it also to the case when we add all symbols in one call. + draco::ShannonEntropyTracker entropy_tracker_2; + entropy_tracker_2.Push(symbols.data(), symbols.size()); + const int64_t stream_2_entropy_bits = entropy_tracker_2.GetNumberOfDataBits(); + ASSERT_EQ(entropy_tracker.GetNumberOfDataBits(), stream_2_entropy_bits); + + // Ensure that peeking does not change the entropy. + entropy_tracker_2.Peek(symbols.data(), 1); + + ASSERT_EQ(stream_2_entropy_bits, entropy_tracker_2.GetNumberOfDataBits()); +} + +} // namespace diff --git a/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc b/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc new file mode 100644 index 000000000..ba7166bbe --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/symbol_coding_test.cc @@ -0,0 +1,170 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/config/compression_shared.h" +#include "draco/compression/entropy/symbol_decoding.h" +#include "draco/compression/entropy/symbol_encoding.h" +#include "draco/core/bit_utils.h" +#include "draco/core/decoder_buffer.h" +#include "draco/core/draco_test_base.h" +#include "draco/core/encoder_buffer.h" + +namespace draco { + +class SymbolCodingTest : public ::testing::Test { + protected: + SymbolCodingTest() : bitstream_version_(kDracoMeshBitstreamVersion) {} + + template + void TestConvertToSymbolAndBack(SignedIntTypeT x) { + typedef typename std::make_unsigned::type Symbol; + Symbol symbol = ConvertSignedIntToSymbol(x); + SignedIntTypeT y = ConvertSymbolToSignedInt(symbol); + ASSERT_EQ(x, y); + } + + uint16_t bitstream_version_; +}; + +TEST_F(SymbolCodingTest, TestLargeNumbers) { + // This test verifies that SymbolCoding successfully encodes an array of large + // numbers. + const uint32_t in[] = {12345678, 1223333, 111, 5}; + const int num_values = sizeof(in) / sizeof(uint32_t); + EncoderBuffer eb; + ASSERT_TRUE(EncodeSymbols(in, num_values, 1, nullptr, &eb)); + + std::vector out; + out.resize(num_values); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(num_values, 1, &db, &out[0])); + for (int i = 0; i < num_values; ++i) { + EXPECT_EQ(in[i], out[i]); + } +} + +TEST_F(SymbolCodingTest, TestManyNumbers) { + // This test verifies that SymbolCoding successfully encodes an array of + // several numbers that repeat many times. + + // Value/frequency pairs. + const std::pair in[] = { + {12, 1500}, {1025, 31000}, {7, 1}, {9, 5}, {0, 6432}}; + + const int num_pairs = sizeof(in) / sizeof(std::pair); + + std::vector in_values; + for (int i = 0; i < num_pairs; ++i) { + in_values.insert(in_values.end(), in[i].second, in[i].first); + } + for (int method = 0; method < NUM_SYMBOL_CODING_METHODS; ++method) { + // Test the encoding using all available symbol coding methods. + Options options; + SetSymbolEncodingMethod(&options, static_cast(method)); + + EncoderBuffer eb; + ASSERT_TRUE( + EncodeSymbols(in_values.data(), in_values.size(), 1, &options, &eb)); + std::vector out_values; + out_values.resize(in_values.size()); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(in_values.size(), 1, &db, &out_values[0])); + for (uint32_t i = 0; i < in_values.size(); ++i) { + ASSERT_EQ(in_values[i], out_values[i]); + } + } +} + +TEST_F(SymbolCodingTest, TestEmpty) { + // This test verifies that SymbolCoding successfully encodes an empty array. + EncoderBuffer eb; + ASSERT_TRUE(EncodeSymbols(nullptr, 0, 1, nullptr, &eb)); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(0, 1, &db, nullptr)); +} + +TEST_F(SymbolCodingTest, TestOneSymbol) { + // This test verifies that SymbolCoding successfully encodes an a single + // symbol. + EncoderBuffer eb; + const std::vector in(1200, 0); + ASSERT_TRUE(EncodeSymbols(in.data(), in.size(), 1, nullptr, &eb)); + + std::vector out(in.size()); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(in.size(), 1, &db, &out[0])); + for (uint32_t i = 0; i < in.size(); ++i) { + ASSERT_EQ(in[i], out[i]); + } +} + +TEST_F(SymbolCodingTest, TestBitLengths) { + // This test verifies that SymbolCoding successfully encodes symbols of + // various bit lengths + EncoderBuffer eb; + std::vector in; + constexpr int bit_lengths = 18; + for (int i = 0; i < bit_lengths; ++i) { + in.push_back(1 << i); + } + std::vector out(in.size()); + for (int i = 0; i < bit_lengths; ++i) { + eb.Clear(); + ASSERT_TRUE(EncodeSymbols(in.data(), i + 1, 1, nullptr, &eb)); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(i + 1, 1, &db, &out[0])); + for (int j = 0; j < i + 1; ++j) { + ASSERT_EQ(in[j], out[j]); + } + } +} + +TEST_F(SymbolCodingTest, TestLargeNumberCondition) { + // This test verifies that SymbolCoding successfully encodes large symbols + // that are on the boundary between raw scheme and tagged scheme (18 bits). + EncoderBuffer eb; + constexpr int num_symbols = 1000000; + const std::vector in(num_symbols, 1 << 18); + ASSERT_TRUE(EncodeSymbols(in.data(), in.size(), 1, nullptr, &eb)); + + std::vector out(in.size()); + DecoderBuffer db; + db.Init(eb.data(), eb.size()); + db.set_bitstream_version(bitstream_version_); + ASSERT_TRUE(DecodeSymbols(in.size(), 1, &db, &out[0])); + for (uint32_t i = 0; i < in.size(); ++i) { + ASSERT_EQ(in[i], out[i]); + } +} + +TEST_F(SymbolCodingTest, TestConversionFullRange) { + TestConvertToSymbolAndBack(static_cast(-128)); + TestConvertToSymbolAndBack(static_cast(-127)); + TestConvertToSymbolAndBack(static_cast(-1)); + TestConvertToSymbolAndBack(static_cast(0)); + TestConvertToSymbolAndBack(static_cast(1)); + TestConvertToSymbolAndBack(static_cast(127)); +} + +} // namespace draco diff --git a/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc b/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc new file mode 100644 index 000000000..93d29971c --- /dev/null +++ b/contrib/draco/src/draco/compression/entropy/symbol_decoding.cc @@ -0,0 +1,181 @@ +// Copyright 2016 The Draco Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include "draco/compression/entropy/symbol_decoding.h" + +#include +#include + +#include "draco/compression/entropy/rans_symbol_decoder.h" + +namespace draco { + +template